From 2779c60f42eb9290b57f24b6fe7c5a4706320bba Mon Sep 17 00:00:00 2001 From: JackDandy Date: Fri, 10 Feb 2023 14:15:50 +0000 Subject: [PATCH 01/21] Change remove calls to legacy py2 fix encoding function. Change various code cleanups, e.g. replace CamelCase with pythonic casing. --- CHANGES.md | 9 +- gui/slick/interfaces/default/home.tmpl | 4 +- gui/slick/interfaces/default/manage.tmpl | 28 +- lib/dateutil/zoneinfo/__init__.py | 8 +- lib/encodingKludge.py | 3 +- lib/exceptions_helper.py | 81 ++---- lib/imdbpie/auth.py | 3 +- lib/sg_helpers.py | 95 ++++--- lib/subliminal/videos.py | 13 +- sickgear.py | 6 +- sickgear/__init__.py | 18 +- sickgear/anime.py | 4 +- sickgear/auto_post_processer.py | 7 +- sickgear/browser.py | 27 +- sickgear/common.py | 14 +- sickgear/config.py | 9 +- sickgear/databases/mainDB.py | 14 +- sickgear/db.py | 18 +- sickgear/failedProcessor.py | 2 +- sickgear/helpers.py | 154 ++++++----- sickgear/image_cache.py | 55 ++-- sickgear/indexers/indexer_api.py | 6 +- sickgear/logger.py | 8 +- sickgear/metadata/generic.py | 134 +++++----- sickgear/metadata/kodi.py | 14 +- sickgear/metadata/mediabrowser.py | 30 +-- sickgear/metadata/ps3.py | 6 +- sickgear/metadata/tivo.py | 20 +- sickgear/metadata/wdtv.py | 11 +- sickgear/metadata/xbmc.py | 6 +- sickgear/name_parser/parser.py | 8 +- sickgear/naming.py | 5 +- sickgear/network_timezones.py | 19 +- sickgear/notifiers/__init__.py | 4 +- sickgear/notifiers/plex.py | 3 +- sickgear/notifiers/pytivo.py | 4 +- sickgear/notifiers/synoindex.py | 9 +- sickgear/notifiers/synologynotifier.py | 4 +- sickgear/notifiers/telegram.py | 6 +- sickgear/notifiers/xbmc.py | 13 +- sickgear/nzbSplitter.py | 8 +- sickgear/piper.py | 20 +- sickgear/postProcessor.py | 73 +++-- sickgear/processTV.py | 158 ++++++----- sickgear/properFinder.py | 4 +- sickgear/providers/filesharingtalk.py | 4 +- sickgear/providers/generic.py | 23 +- sickgear/providers/omgwtfnzbs.py | 4 +- sickgear/providers/tokyotoshokan.py | 4 +- sickgear/scene_exceptions.py | 22 +- sickgear/search.py | 6 +- sickgear/show_name_helpers.py | 91 ++++--- sickgear/show_queue.py | 173 ++++++------ sickgear/show_updater.py | 21 +- sickgear/subtitles.py | 59 ++-- sickgear/traktChecker.py | 5 +- sickgear/tv.py | 132 +++++---- sickgear/ui.py | 4 +- sickgear/version_checker.py | 10 +- sickgear/webapi.py | 50 ++-- sickgear/webserve.py | 325 +++++++++++------------ tests/network_timezone_tests.py | 8 +- tests/scene_helpers_tests.py | 2 +- 63 files changed, 962 insertions(+), 1126 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index dd751c0f..9e068fbd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,9 @@ -### 3.27.2 (2023-02-10 19:25:00 UTC) +### 3.28.0 (2023-xx-xx xx:xx:00 UTC) + +* Change remove calls to legacy py2 fix encoding function + + +### 3.27.2 (2023-02-10 19:25:00 UTC) * Fix revert update * Fix installations that don't have previously saved cleanup lock files @@ -710,7 +715,7 @@ * Change allow Python 3.8.10 and 3.9.5 * Remove PiSexy provider -* Fix refreshShow, prevent another refresh of show if already in queue and not forced +* Fix refresh_show, prevent another refresh of show if already in queue and not forced * Fix webapi set scene season * Fix set path in all_tests for py2 * Fix webapi exception if no backlog was done before (CMD_SickGearCheckScheduler) diff --git a/gui/slick/interfaces/default/home.tmpl b/gui/slick/interfaces/default/home.tmpl index 36615d4c..f37140cf 100644 --- a/gui/slick/interfaces/default/home.tmpl +++ b/gui/slick/interfaces/default/home.tmpl @@ -99,7 +99,7 @@
## - #for $curLoadingShow in $sickgear.show_queue_scheduler.action.loadingShowList + #for $curLoadingShow in $sickgear.show_queue_scheduler.action.loading_showlist ## #if $curLoadingShow.show_obj != None and $curLoadingShow.show_obj in $sg_str('showList') #continue @@ -292,7 +292,7 @@ - #for $curLoadingShow in $sickgear.show_queue_scheduler.action.loadingShowList + #for $curLoadingShow in $sickgear.show_queue_scheduler.action.loading_showlist #if $curLoadingShow.show_obj != None and $curLoadingShow.show_obj in $sg_str('showList') #continue #end if diff --git a/gui/slick/interfaces/default/manage.tmpl b/gui/slick/interfaces/default/manage.tmpl index 7433352f..a8cfe82a 100644 --- a/gui/slick/interfaces/default/manage.tmpl +++ b/gui/slick/interfaces/default/manage.tmpl @@ -188,37 +188,37 @@ $xsrf_form_html #set $show_size = $max if not $show_loc else $get_size($show_loc) #set $option_state = '' ## - #set $curUpdate_disabled = $sickgear.show_queue_scheduler.action.isBeingUpdated($cur_show_obj)\ - or $sickgear.show_queue_scheduler.action.isInUpdateQueue($cur_show_obj) + #set $curUpdate_disabled = $sickgear.show_queue_scheduler.action.is_being_updated($cur_show_obj)\ + or $sickgear.show_queue_scheduler.action.is_in_update_queue($cur_show_obj) #set $tip = ' title="Update%s"' % ('', $disabled_inprogress_tip)[$curUpdate_disabled] #set $curUpdate = ($tip, $option_state % (('', $disabled)[$curUpdate_disabled], 'update', $tip)) ## - #set $curRefresh_disabled = $sickgear.show_queue_scheduler.action.isBeingRefreshed($cur_show_obj)\ - or $sickgear.show_queue_scheduler.action.isInRefreshQueue($cur_show_obj) + #set $curRefresh_disabled = $sickgear.show_queue_scheduler.action.is_being_refreshed($cur_show_obj)\ + or $sickgear.show_queue_scheduler.action.is_in_refresh_queue($cur_show_obj) #set $tip = ' title="Rescan%s"' % ('', $disabled_inprogress_tip)[$curRefresh_disabled] #set $curRefresh = ($tip, $option_state % (('', $disabled)[$curRefresh_disabled], 'refresh', $tip)) ## - #set $curRename_disabled = $sickgear.show_queue_scheduler.action.isBeingRenamed($cur_show_obj)\ - or $sickgear.show_queue_scheduler.action.isInRenameQueue($cur_show_obj) + #set $curRename_disabled = $sickgear.show_queue_scheduler.action.is_being_renamed($cur_show_obj)\ + or $sickgear.show_queue_scheduler.action.is_in_rename_queue($cur_show_obj) #set $tip = ' title="Rename%s"' % ('', $disabled_inprogress_tip)[$curRename_disabled] #set $curRename = ($tip, $option_state % (('', $disabled)[$curRename_disabled], 'rename', $tip)) ## #set $subtitles_disabled = not $cur_show_obj.subtitles\ - or $sickgear.show_queue_scheduler.action.isBeingSubtitled($cur_show_obj)\ - or $sickgear.show_queue_scheduler.action.isInSubtitleQueue($cur_show_obj) + or $sickgear.show_queue_scheduler.action.is_being_subtitled($cur_show_obj)\ + or $sickgear.show_queue_scheduler.action.is_in_subtitle_queue($cur_show_obj) #set $tip = (' title="Search subtitle"', (' title="Search subtitle%s"' % $disabled_inprogress_tip, $disabled_subtitles_tip)[not $cur_show_obj.subtitles])[$subtitles_disabled] #set $curSubtitle = ($tip, $option_state % (('', $disabled)[$subtitles_disabled], 'subtitle', $tip)) ## - #set $curDelete_disabled = $sickgear.show_queue_scheduler.action.isBeingRenamed($cur_show_obj)\ - or $sickgear.show_queue_scheduler.action.isInRenameQueue($cur_show_obj)\ - or $sickgear.show_queue_scheduler.action.isInRefreshQueue($cur_show_obj) + #set $curDelete_disabled = $sickgear.show_queue_scheduler.action.is_being_renamed($cur_show_obj)\ + or $sickgear.show_queue_scheduler.action.is_in_rename_queue($cur_show_obj)\ + or $sickgear.show_queue_scheduler.action.is_in_refresh_queue($cur_show_obj) #set $tip = ' title="Delete%s"' % ('', $disabled_inprogress_tip)[$curDelete_disabled] #set $curDelete = ($tip, $option_state % (('', $disabled)[$curDelete_disabled], 'delete', $tip)) ## - #set $curRemove_disabled = $sickgear.show_queue_scheduler.action.isBeingRenamed($cur_show_obj)\ - or $sickgear.show_queue_scheduler.action.isInRenameQueue($cur_show_obj)\ - or $sickgear.show_queue_scheduler.action.isInRefreshQueue($cur_show_obj) + #set $curRemove_disabled = $sickgear.show_queue_scheduler.action.is_being_renamed($cur_show_obj)\ + or $sickgear.show_queue_scheduler.action.is_in_rename_queue($cur_show_obj)\ + or $sickgear.show_queue_scheduler.action.is_in_refresh_queue($cur_show_obj) #set $tip = ' title="Remove%s"' % ('', $disabled_inprogress_tip)[$curRemove_disabled] #set $curRemove = ($tip, $option_state % (('', $disabled)[$curRemove_disabled], 'remove', $tip)) diff --git a/lib/dateutil/zoneinfo/__init__.py b/lib/dateutil/zoneinfo/__init__.py index 5628bfc3..27cc9df4 100644 --- a/lib/dateutil/zoneinfo/__init__.py +++ b/lib/dateutil/zoneinfo/__init__.py @@ -9,8 +9,6 @@ from io import BytesIO from dateutil.tz import tzfile as _tzfile -# noinspection PyPep8Naming -import encodingKludge as ek import sickgear __all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"] @@ -27,10 +25,10 @@ class tzfile(_tzfile): def getzoneinfofile_stream(): try: # return BytesIO(get_data(__name__, ZONEFILENAME)) - zonefile = ek.ek(os.path.join, sickgear.ZONEINFO_DIR, ZONEFILENAME) - if not ek.ek(os.path.isfile, zonefile): + zonefile = os.path.join(sickgear.ZONEINFO_DIR, ZONEFILENAME) + if not os.path.isfile(zonefile): warnings.warn('Falling back to included zoneinfo file') - zonefile = ek.ek(os.path.join, ek.ek(os.path.dirname, __file__), ZONEFILENAME) + zonefile = os.path.join(os.path.dirname(__file__), ZONEFILENAME) with open(zonefile, 'rb') as f: return BytesIO(f.read()) except IOError as e: # TODO switch to FileNotFoundError? diff --git a/lib/encodingKludge.py b/lib/encodingKludge.py index 1dcb6f67..7946bf91 100644 --- a/lib/encodingKludge.py +++ b/lib/encodingKludge.py @@ -39,7 +39,6 @@ def set_sys_encoding(): :return: The encoding that is set """ - sys_encoding = None should_exit = False try: locale.setlocale(locale.LC_ALL, '') @@ -48,7 +47,7 @@ def set_sys_encoding(): try: sys_encoding = locale.getpreferredencoding() except (locale.Error, IOError): - pass + sys_encoding = None # For OSes that are poorly configured I'll just randomly force UTF-8 if not sys_encoding or sys_encoding in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'): diff --git a/lib/exceptions_helper.py b/lib/exceptions_helper.py index 1232fe0b..e0628bbf 100644 --- a/lib/exceptions_helper.py +++ b/lib/exceptions_helper.py @@ -16,9 +16,6 @@ from six import PY2, string_types -if PY2: - from encodingKludge import fixStupidEncodings - # noinspection PyUnreachableCode if False: from typing import AnyStr @@ -28,128 +25,100 @@ def ex(e): # type: (BaseException) -> AnyStr """Returns a unicode string from the exception text if it exists""" - if not PY2: - return str(e) - - e_message = u'' - - if not e or not e.args: - return e_message - - for arg in e.args: - - if None is not arg: - if isinstance(arg, string_types): - fixed_arg = fixStupidEncodings(arg, True) - - else: - try: - fixed_arg = u'error ' + fixStupidEncodings(str(arg), True) - - except (BaseException, Exception): - fixed_arg = None - - if fixed_arg: - if not e_message: - e_message = fixed_arg - - else: - e_message = e_message + ' : ' + fixed_arg - - return e_message + return str(e) -class SickBeardException(Exception): +class SickGearException(Exception): """Generic SickGear Exception - should never be thrown, only subclassed""" -class ConfigErrorException(SickBeardException): +class ConfigErrorException(SickGearException): """Error in the config file""" -class LaterException(SickBeardException): +class LaterException(SickGearException): """Something bad happened that I'll make a real exception for later""" -class NoNFOException(SickBeardException): +class NoNFOException(SickGearException): """No NFO was found!""" -class NoShowDirException(SickBeardException): +class NoShowDirException(SickGearException): """Unable to find the show's directory""" -class FileNotFoundException(SickBeardException): +class FileNotFoundException(SickGearException): """The specified file doesn't exist""" -class MultipleDBEpisodesException(SickBeardException): +class MultipleDBEpisodesException(SickGearException): """Found multiple episodes in the DB! Must fix DB first""" -class MultipleDBShowsException(SickBeardException): +class MultipleDBShowsException(SickGearException): """Found multiple shows in the DB! Must fix DB first""" -class MultipleShowObjectsException(SickBeardException): +class MultipleShowObjectsException(SickGearException): """Found multiple objects for the same show! Something is very wrong""" -class WrongShowException(SickBeardException): +class WrongShowException(SickGearException): """The episode doesn't belong to the same show as its parent folder""" -class ShowNotFoundException(SickBeardException): +class ShowNotFoundException(SickGearException): """The show wasn't found on the Indexer""" -class EpisodeNotFoundException(SickBeardException): +class EpisodeNotFoundException(SickGearException): """The episode wasn't found on the Indexer""" -class ShowDirNotFoundException(SickBeardException): +class ShowDirNotFoundException(SickGearException): """The show dir doesn't exist""" -class AuthException(SickBeardException): +class AuthException(SickGearException): """Your authentication information is incorrect""" -class EpisodeDeletedException(SickBeardException): +class EpisodeDeletedException(SickGearException): """This episode has been deleted""" -class CantRefreshException(SickBeardException): +class CantRefreshException(SickGearException): """The show can't be refreshed right now""" -class CantUpdateException(SickBeardException): +class CantUpdateException(SickGearException): """The show can't be updated right now""" -class CantSwitchException(SickBeardException): +class CantSwitchException(SickGearException): """The show can't be switched right now""" -class PostProcessingFailed(SickBeardException): +class PostProcessingFailed(SickGearException): """Post-processing the episode failed""" -class FailedProcessingFailed(SickBeardException): +class FailedProcessingFailed(SickGearException): """Post-processing the failed release failed""" -class FailedHistoryMultiSnatchException(SickBeardException): +class FailedHistoryMultiSnatchException(SickGearException): """Episode was snatched again before the first one was done""" -class FailedHistoryNotFoundException(SickBeardException): +class FailedHistoryNotFoundException(SickGearException): """The release was not found in the failed download history tracker""" -class EpisodeNotFoundByAbsoluteNumberException(SickBeardException): +class EpisodeNotFoundByAbsoluteNumberException(SickGearException): """The show wasn't found in the DB while looking at Absolute Numbers""" -class ConnectionSkipException(SickBeardException): +class ConnectionSkipException(SickGearException): """Connection was skipped because of previous errors""" diff --git a/lib/imdbpie/auth.py b/lib/imdbpie/auth.py index 3b292fb8..d6e802ce 100644 --- a/lib/imdbpie/auth.py +++ b/lib/imdbpie/auth.py @@ -265,9 +265,8 @@ class Auth(object): except ValueError as e: if not retry: cache.close() - import encodingKludge as ek import os - ek.ek(os.remove, ek.ek(os.path.join, self._cachedir, diskcache.core.DBNAME)) + os.remove(os.path.join(self._cachedir, diskcache.core.DBNAME)) return self._get_creds(retry=True) else: raise e diff --git a/lib/sg_helpers.py b/lib/sg_helpers.py index 67f2c771..e7ad08c1 100644 --- a/lib/sg_helpers.py +++ b/lib/sg_helpers.py @@ -32,7 +32,7 @@ from cfscrape import CloudflareScraper from send2trash import send2trash # noinspection PyPep8Naming -import encodingKludge as ek +from encodingKludge import SYS_ENCODING import requests from _23 import decode_bytes, filter_list, html_unescape, list_range, \ @@ -65,7 +65,7 @@ if False: html_convert_fractions = {0: '', 25: '¼', 50: '½', 75: '¾', 100: 1} -PROG_DIR = ek.ek(os.path.join, os.path.dirname(os.path.normpath(os.path.abspath(__file__))), '..') +PROG_DIR = os.path.join(os.path.dirname(os.path.normpath(os.path.abspath(__file__))), '..') # Mapping error status codes to official W3C names http_error_code = { @@ -676,9 +676,9 @@ def get_system_temp_dir(): try: uid = getpass.getuser() except ImportError: - return ek.ek(os.path.join, tempfile.gettempdir(), 'SickGear') + return os.path.join(tempfile.gettempdir(), 'SickGear') - return ek.ek(os.path.join, tempfile.gettempdir(), 'SickGear-%s' % uid) + return os.path.join(tempfile.gettempdir(), 'SickGear-%s' % uid) def proxy_setting(setting, request_url, force=False): @@ -834,7 +834,7 @@ def get_url(url, # type: AnyStr if not kwargs.pop('nocache', False): cache_dir = CACHE_DIR or get_system_temp_dir() - session = CacheControl(sess=session, cache=caches.FileCache(ek.ek(os.path.join, cache_dir, 'sessions'))) + session = CacheControl(sess=session, cache=caches.FileCache(os.path.join(cache_dir, 'sessions'))) provider = kwargs.pop('provider', None) @@ -1065,11 +1065,11 @@ def scantree(path, # type: AnyStr :param filter_kind: None to yield everything, True yields directories, False yields files :param recurse: Recursively scan the tree """ - if isinstance(path, string_types) and path and ek.ek(os.path.isdir, path): + if isinstance(path, string_types) and path and os.path.isdir(path): rc_exc, rc_inc = [re.compile(rx % '|'.join( [x for x in (param, ([param], [])[None is param])[not isinstance(param, list)]])) for rx, param in ((r'(?i)^(?:(?!%s).)*$', exclude), (r'(?i)%s', include))] - for entry in ek.ek(scandir, path): + for entry in scandir(path): is_dir = entry.is_dir(follow_symlinks=follow_symlinks) is_file = entry.is_file(follow_symlinks=follow_symlinks) no_filter = any([None is filter_kind, filter_kind and is_dir, not filter_kind and is_file]) @@ -1084,25 +1084,25 @@ def scantree(path, # type: AnyStr def copy_file(src_file, dest_file): if os.name.startswith('posix'): - ek.ek(subprocess.call, ['cp', src_file, dest_file]) + subprocess.call(['cp', src_file, dest_file]) else: - ek.ek(shutil.copyfile, src_file, dest_file) + shutil.copyfile(src_file, dest_file) try: - ek.ek(shutil.copymode, src_file, dest_file) + shutil.copymode(src_file, dest_file) except OSError: pass def move_file(src_file, dest_file, raise_exceptions=False): try: - ek.ek(shutil.move, src_file, dest_file) + shutil.move(src_file, dest_file) fix_set_group_id(dest_file) except OSError: copy_file(src_file, dest_file) - if ek.ek(os.path.exists, dest_file): + if os.path.exists(dest_file): fix_set_group_id(dest_file) - ek.ek(os.unlink, src_file) + os.unlink(src_file) elif raise_exceptions: raise OSError('Destination file could not be created: %s' % dest_file) @@ -1118,13 +1118,13 @@ def fix_set_group_id(child_path): if os.name in ('nt', 'ce'): return - parent_path = ek.ek(os.path.dirname, child_path) - parent_stat = ek.ek(os.stat, parent_path) + parent_path = os.path.dirname(child_path) + parent_stat = os.stat(parent_path) parent_mode = stat.S_IMODE(parent_stat[stat.ST_MODE]) if parent_mode & stat.S_ISGID: parent_gid = parent_stat[stat.ST_GID] - child_stat = ek.ek(os.stat, child_path) + child_stat = os.stat(child_path) child_gid = child_stat[stat.ST_GID] if child_gid == parent_gid: @@ -1138,7 +1138,7 @@ def fix_set_group_id(child_path): return try: - ek.ek(os.chown, child_path, -1, parent_gid) # only available on UNIX + os.chown(child_path, -1, parent_gid) # only available on UNIX logger.debug(u'Respecting the set-group-ID bit on the parent directory for %s' % child_path) except OSError: logger.error(u'Failed to respect the set-group-id bit on the parent directory for %s (setting group id %i)' @@ -1154,11 +1154,11 @@ def remove_file_perm(filepath, log_err=True): :param log_err: False to suppress log msgs :return True if filepath does not exist else None if no removal """ - if not ek.ek(os.path.exists, filepath): + if not os.path.exists(filepath): return True for t in list_range(10): # total seconds to wait 0 - 9 = 45s over 10 iterations try: - ek.ek(os.remove, filepath) + os.remove(filepath) except OSError as e: if getattr(e, 'winerror', 0) not in (5, 32): # 5=access denied (e.g. av), 32=another process has lock if log_err: @@ -1167,7 +1167,7 @@ def remove_file_perm(filepath, log_err=True): except (BaseException, Exception): pass time.sleep(t) - if not ek.ek(os.path.exists, filepath): + if not os.path.exists(filepath): return True if log_err: logger.warning('Unable to delete %s' % filepath) @@ -1195,11 +1195,11 @@ def remove_file(filepath, tree=False, prefix_failure='', log_level=logging.INFO) result = 'Deleted' if TRASH_REMOVE_SHOW: result = 'Trashed' - ek.ek(send2trash, filepath) + send2trash(filepath) elif tree: - ek.ek(shutil.rmtree, filepath) + shutil.rmtree(filepath) else: - ek.ek(os.remove, filepath) + os.remove(filepath) except OSError as e: if getattr(e, 'winerror', 0) not in (5, 32): # 5=access denied (e.g. av), 32=another process has lock logger.log(level=log_level, msg=u'%sUnable to %s %s %s: %s' % @@ -1207,10 +1207,10 @@ def remove_file(filepath, tree=False, prefix_failure='', log_level=logging.INFO) ('file', 'dir')[tree], filepath, ex(e))) break time.sleep(t) - if not ek.ek(os.path.exists, filepath): + if not os.path.exists(filepath): break - return (None, result)[filepath and not ek.ek(os.path.exists, filepath)] + return (None, result)[filepath and not os.path.exists(filepath)] def touch_file(name, atime=None, dir_name=None): @@ -1224,9 +1224,9 @@ def touch_file(name, atime=None, dir_name=None): :return: success """ if None is not dir_name: - name = ek.ek(os.path.join, dir_name, name) + name = os.path.join(dir_name, name) if make_path(dir_name): - if not ek.ek(os.path.exists, name): + if not os.path.exists(name): with io.open(name, 'w') as fh: fh.flush() if None is atime: @@ -1235,7 +1235,7 @@ def touch_file(name, atime=None, dir_name=None): if None is not atime: try: with open(name, 'a'): - ek.ek(os.utime, name, (atime, atime)) + os.utime(name, (atime, atime)) return True except (BaseException, Exception): logger.debug('File air date stamping not available on your OS') @@ -1253,12 +1253,12 @@ def make_path(name, syno=False): :param syno: whether to trigger a syno library update for path :return: success or dir exists """ - if not ek.ek(os.path.isdir, name): + if not os.path.isdir(name): # Windows, create all missing folders if os.name in ('nt', 'ce'): try: logger.debug(u'Path %s doesn\'t exist, creating it' % name) - ek.ek(os.makedirs, name) + os.makedirs(name) except (OSError, IOError) as e: logger.error(u'Failed creating %s : %s' % (name, ex(e))) return False @@ -1273,14 +1273,14 @@ def make_path(name, syno=False): sofar += cur_folder + os.path.sep # if it exists then just keep walking down the line - if ek.ek(os.path.isdir, sofar): + if os.path.isdir(sofar): continue try: logger.debug(u'Path %s doesn\'t exist, creating it' % sofar) - ek.ek(os.mkdir, sofar) + os.mkdir(sofar) # use normpath to remove end separator, otherwise checks permissions against itself - chmod_as_parent(ek.ek(os.path.normpath, sofar)) + chmod_as_parent(os.path.normpath(sofar)) if syno: # do the library update for synoindex NOTIFIERS.NotifierFactory().get('SYNOINDEX').addFolder(sofar) @@ -1302,19 +1302,19 @@ def chmod_as_parent(child_path): if os.name in ('nt', 'ce'): return - parent_path = ek.ek(os.path.dirname, child_path) + parent_path = os.path.dirname(child_path) if not parent_path: logger.debug(u'No parent path provided in %s, unable to get permissions from it' % child_path) return - parent_path_stat = ek.ek(os.stat, parent_path) + parent_path_stat = os.stat(parent_path) parent_mode = stat.S_IMODE(parent_path_stat[stat.ST_MODE]) - child_path_stat = ek.ek(os.stat, child_path) + child_path_stat = os.stat(child_path) child_path_mode = stat.S_IMODE(child_path_stat[stat.ST_MODE]) - if ek.ek(os.path.isfile, child_path): + if os.path.isfile(child_path): child_mode = file_bit_filter(parent_mode) else: child_mode = parent_mode @@ -1330,7 +1330,7 @@ def chmod_as_parent(child_path): return try: - ek.ek(os.chmod, child_path, child_mode) + os.chmod(child_path, child_mode) logger.debug(u'Setting permissions for %s to %o as parent directory has %o' % (child_path, child_mode, parent_mode)) except OSError: @@ -1366,17 +1366,17 @@ def write_file(filepath, # type: AnyStr """ result = False - if make_path(ek.ek(os.path.dirname, filepath)): + if make_path(os.path.dirname(filepath)): try: if raw: empty_file = True - with ek.ek(io.FileIO, filepath, 'wb') as fh: + with io.FileIO(filepath, 'wb') as fh: for chunk in data.iter_content(chunk_size=1024): if chunk: empty_file = False fh.write(chunk) fh.flush() - ek.ek(os.fsync, fh.fileno()) + os.fsync(fh.fileno()) if empty_file: remove_file_perm(filepath, log_err=False) return result @@ -1384,11 +1384,11 @@ def write_file(filepath, # type: AnyStr w_mode = 'w' if utf8: w_mode = 'a' - with ek.ek(io.FileIO, filepath, 'wb') as fh: + with io.FileIO(filepath, 'wb') as fh: fh.write(codecs.BOM_UTF8) if xmltree: - with ek.ek(io.FileIO, filepath, w_mode) as fh: + with io.FileIO(filepath, w_mode) as fh: params = {} if utf8: params = dict(encoding='utf-8') @@ -1397,10 +1397,10 @@ def write_file(filepath, # type: AnyStr data.write(fh, **params) else: if isinstance(data, text_type): - with ek.ek(io.open, filepath, w_mode, encoding='utf-8') as fh: + with io.open(filepath, w_mode, encoding='utf-8') as fh: fh.write(data) else: - with ek.ek(io.FileIO, filepath, w_mode) as fh: + with io.FileIO(filepath, w_mode) as fh: fh.write(data) chmod_as_parent(filepath) @@ -1451,7 +1451,7 @@ def replace_extension(filename, new_ext): def long_path(path): # type: (AnyStr) -> AnyStr """add long path prefix for Windows""" - if 'nt' == os.name and 260 < len(path) and not path.startswith('\\\\?\\') and ek.ek(os.path.isabs, path): + if 'nt' == os.name and 260 < len(path) and not path.startswith('\\\\?\\') and os.path.isabs(path): return '\\\\?\\' + path return path @@ -1504,8 +1504,7 @@ def cmdline_runner(cmd, shell=False, suppress_stderr=False, env=None): if isinstance(env, dict): kw.update(env=dict(os.environ, **env)) - if not PY2: - kw.update(dict(encoding=ek.SYS_ENCODING, text=True, bufsize=0)) + kw.update(dict(encoding=SYS_ENCODING, text=True, bufsize=0)) if 'win32' == sys.platform: kw['creationflags'] = 0x08000000 # CREATE_NO_WINDOW (needed for py2exe) diff --git a/lib/subliminal/videos.py b/lib/subliminal/videos.py index 84a8fa11..e83fd7c3 100644 --- a/lib/subliminal/videos.py +++ b/lib/subliminal/videos.py @@ -29,9 +29,6 @@ import struct from six import PY2, text_type from _23 import decode_str -# noinspection PyPep8Naming -import encodingKludge as ek - __all__ = ['EXTENSIONS', 'MIMETYPES', 'Video', 'Episode', 'Movie', 'UnknownVideo', 'scan', 'hash_opensubtitles', 'hash_thesubdb'] @@ -62,10 +59,10 @@ class Video(object): self._path = None self.hashes = {} self.subtitle_path = subtitle_path - + if PY2 and isinstance(path, text_type): path = path.encode('utf-8') - + if os.path.exists(path): self._path = path self.size = os.path.getsize(self._path) @@ -150,8 +147,8 @@ class Video(object): folder = '.' existing = [f for f in os.listdir(folder) if f.startswith(basename)] if self.subtitle_path: - subsDir = ek.ek(os.path.join, folder, self.subtitle_path) - if ek.ek(os.path.isdir, subsDir): + subsDir = os.path.join(folder, self.subtitle_path) + if os.path.isdir(subsDir): existing.extend([f for f in os.listdir(subsDir) if f.startswith(basename)]) for path in existing: for ext in subtitles.EXTENSIONS: @@ -232,7 +229,7 @@ def scan(entry, max_depth=3, scan_filter=None, depth=0): """ if PY2 and isinstance(entry, text_type): entry = entry.encode('utf-8') - + if depth > max_depth != 0: # we do not want to search the whole file system except if max_depth = 0 return [] if os.path.isdir(entry): # a dir? recurse diff --git a/sickgear.py b/sickgear.py index d8caf88f..d5d5e85d 100755 --- a/sickgear.py +++ b/sickgear.py @@ -90,7 +90,7 @@ from multiprocessing import freeze_support from configobj import ConfigObj # noinspection PyPep8Naming -from encodingKludge import EXIT_BAD_ENCODING, SYS_ENCODING +from encodingKludge import SYS_ENCODING from exceptions_helper import ex import sickgear from sickgear import db, logger, name_cache, network_timezones @@ -200,10 +200,6 @@ class SickGear(object): sickgear.PROG_DIR = os.path.dirname(sickgear.MY_FULLNAME) sickgear.DATA_DIR = sickgear.PROG_DIR sickgear.MY_ARGS = sys.argv[1:] - if EXIT_BAD_ENCODING: - print('Sorry, you MUST add the SickGear folder to the PYTHONPATH environment variable') - print('or find another way to force Python to use %s for string encoding.' % SYS_ENCODING) - sys.exit(1) sickgear.SYS_ENCODING = SYS_ENCODING legacy_runner = globals().get('_legacy_sickgear_runner') if not legacy_runner: diff --git a/sickgear/__init__.py b/sickgear/__init__.py index c8e3ff06..a0a0ed49 100644 --- a/sickgear/__init__.py +++ b/sickgear/__init__.py @@ -34,8 +34,6 @@ import threading import uuid import zlib -# noinspection PyPep8Naming -import encodingKludge as ek from . import classes, db, helpers, image_cache, indexermapper, logger, metadata, naming, people_queue, providers, \ scene_exceptions, scene_numbering, scheduler, search_backlog, search_propers, search_queue, search_recent, \ show_queue, show_updater, subtitles, trakt_helpers, traktChecker, version_checker, watchedstate_queue @@ -513,7 +511,7 @@ ANIDB_USE_MYLIST = False ADBA_CONNECTION = None # type: Connection ANIME_TREAT_AS_HDTV = False -GUI_NAME = None +GUI_NAME = '' DEFAULT_HOME = None FANART_LIMIT = None FANART_PANEL = None @@ -811,8 +809,8 @@ def init_stage_1(console_logging): # clean cache folders if CACHE_DIR: helpers.clear_cache() - ZONEINFO_DIR = ek.ek(os.path.join, CACHE_DIR, 'zoneinfo') - if not ek.ek(os.path.isdir, ZONEINFO_DIR) and not helpers.make_path(ZONEINFO_DIR): + ZONEINFO_DIR = os.path.join(CACHE_DIR, 'zoneinfo') + if not os.path.isdir(ZONEINFO_DIR) and not helpers.make_path(ZONEINFO_DIR): logger.log(u'!!! Creating local zoneinfo dir failed', logger.ERROR) sg_helpers.CACHE_DIR = CACHE_DIR sg_helpers.DATA_DIR = DATA_DIR @@ -1054,8 +1052,8 @@ def init_stage_1(console_logging): NZBGET_SKIP_PM = bool(check_setting_int(CFG, 'NZBGet', 'nzbget_skip_process_media', 0)) try: - ng_script_file = ek.ek(os.path.join, ek.ek(os.path.dirname, ek.ek(os.path.dirname, __file__)), - 'autoProcessTV', 'SickGear-NG', 'SickGear-NG.py') + ng_script_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), + 'autoProcessTV', 'SickGear-NG', 'SickGear-NG.py') with io.open(ng_script_file, 'r', encoding='utf8') as ng: text = ng.read() NZBGET_SCRIPT_VERSION = re.search(r""".*version: (\d+\.\d+)""", text, flags=re.M).group(1) @@ -1615,7 +1613,7 @@ def init_stage_2(): cycleTime=datetime.timedelta(hours=1), start_time=datetime.time(hour=SHOW_UPDATE_HOUR), threadName='SHOWUPDATER', - prevent_cycle_run=show_queue_scheduler.action.isShowUpdateRunning) # 3AM + prevent_cycle_run=show_queue_scheduler.action.is_show_update_running) # 3AM people_queue_scheduler = scheduler.Scheduler( people_queue.PeopleQueue(), @@ -1718,9 +1716,9 @@ def init_stage_2(): MEMCACHE['history_tab'] = History.menu_tab(MEMCACHE['history_tab_limit']) try: - for f in ek.ek(scandir, ek.ek(os.path.join, PROG_DIR, 'gui', GUI_NAME, 'images', 'flags')): + for f in scandir(os.path.join(PROG_DIR, 'gui', GUI_NAME, 'images', 'flags')): if f.is_file(): - MEMCACHE_FLAG_IMAGES[ek.ek(os.path.splitext, f.name)[0].lower()] = True + MEMCACHE_FLAG_IMAGES[os.path.splitext(f.name)[0].lower()] = True except (BaseException, Exception): pass diff --git a/sickgear/anime.py b/sickgear/anime.py index 48347021..47eeb48d 100644 --- a/sickgear/anime.py +++ b/sickgear/anime.py @@ -19,8 +19,6 @@ import os import adba from adba.aniDBresponses import LoginFirstResponse -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex import sickgear @@ -182,7 +180,7 @@ def short_group_names(groups): def anidb_cache_dir(): # type: (...) -> Optional[AnyStr] - cache_dir = ek.ek(os.path.join, sickgear.CACHE_DIR or get_system_temp_dir(), 'anidb') + cache_dir = os.path.join(sickgear.CACHE_DIR or get_system_temp_dir(), 'anidb') if not make_path(cache_dir): cache_dir = None return cache_dir diff --git a/sickgear/auto_post_processer.py b/sickgear/auto_post_processer.py index 5e9f265e..dfa97031 100644 --- a/sickgear/auto_post_processer.py +++ b/sickgear/auto_post_processer.py @@ -16,9 +16,6 @@ import os.path -# noinspection PyPep8Naming -import encodingKludge as ek - import sickgear from . import logger, processTV @@ -40,12 +37,12 @@ class PostProcesser(object): @staticmethod def _main(): - if not ek.ek(os.path.isdir, sickgear.TV_DOWNLOAD_DIR): + if not os.path.isdir(sickgear.TV_DOWNLOAD_DIR): logger.log(u"Automatic post-processing attempted but dir %s doesn't exist" % sickgear.TV_DOWNLOAD_DIR, logger.ERROR) return - if not ek.ek(os.path.isabs, sickgear.TV_DOWNLOAD_DIR): + if not os.path.isabs(sickgear.TV_DOWNLOAD_DIR): logger.log(u'Automatic post-processing attempted but dir %s is relative ' '(and probably not what you really want to process)' % sickgear.TV_DOWNLOAD_DIR, logger.ERROR) return diff --git a/sickgear/browser.py b/sickgear/browser.py index 1dc1a60b..1c62b9e0 100644 --- a/sickgear/browser.py +++ b/sickgear/browser.py @@ -17,8 +17,6 @@ import os import string -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex from . import logger @@ -31,7 +29,7 @@ if 'nt' == os.name: # adapted from # http://stackoverflow.com/questions/827371/is-there-a-way-to-list-all-the-available-drive-letters-in-python/827490 -def getWinDrives(): +def get_win_drives(): """ Return list of detected drives """ assert 'nt' == os.name @@ -45,15 +43,6 @@ def getWinDrives(): return drives -def foldersAtPath(path, include_parent=False, include_files=False, **kwargs): - """ deprecated_item, remove in 2020 """ - """ prevent issues with requests using legacy params """ - include_parent = include_parent or kwargs.get('includeParent') or False - include_files = include_files or kwargs.get('includeFiles') or False - """ /legacy """ - return folders_at_path(path, include_parent, include_files) - - def folders_at_path(path, include_parent=False, include_files=False): """ Returns a list of dictionaries with the folders contained at the given path Give the empty string as the path to list the contents of the root path @@ -61,17 +50,17 @@ def folders_at_path(path, include_parent=False, include_files=False): """ # walk up the tree until we find a valid path - while path and not ek.ek(os.path.isdir, path): - if path == ek.ek(os.path.dirname, path): + while path and not os.path.isdir(path): + if path == os.path.dirname(path): path = '' break else: - path = ek.ek(os.path.dirname, path) + path = os.path.dirname(path) if '' == path: if 'nt' == os.name: entries = [{'currentPath': r'\My Computer'}] - for letter in getWinDrives(): + for letter in get_win_drives(): letter_path = '%s:\\' % letter entries.append({'name': letter_path, 'path': letter_path}) return entries @@ -79,8 +68,8 @@ def folders_at_path(path, include_parent=False, include_files=False): path = '/' # fix up the path and find the parent - path = ek.ek(os.path.abspath, ek.ek(os.path.normpath, path)) - parent_path = ek.ek(os.path.dirname, path) + path = os.path.abspath(os.path.normpath(path)) + parent_path = os.path.dirname(path) # if we're at the root then the next step is the meta-node showing our drive letters if 'nt' == os.name and path == parent_path: @@ -92,7 +81,7 @@ def folders_at_path(path, include_parent=False, include_files=False): logger.log('Unable to open %s: %r / %s' % (path, e, ex(e)), logger.WARNING) file_list = get_file_list(parent_path, include_files) - file_list = sorted(file_list, key=lambda x: ek.ek(os.path.basename, x['name']).lower()) + file_list = sorted(file_list, key=lambda x: os.path.basename(x['name']).lower()) entries = [{'currentPath': path}] if include_parent and path != parent_path: diff --git a/sickgear/common.py b/sickgear/common.py index ac7c6b45..804fee6b 100644 --- a/sickgear/common.py +++ b/sickgear/common.py @@ -240,9 +240,7 @@ class Quality(object): :rtype: int """ - # noinspection PyPep8Naming - import encodingKludge as ek - name = ek.ek(os.path.basename, name) + name = os.path.basename(name) # if we have our exact text then assume we put it there for _x in sorted(iterkeys(Quality.qualityStrings), reverse=True): @@ -268,10 +266,8 @@ class Quality(object): :return: :rtype: int """ - # noinspection PyPep8Naming - import encodingKludge as ek from sickgear import logger - name = ek.ek(os.path.basename, name) + name = os.path.basename(name) name_has = (lambda quality_list, func=all: func([re.search(q, name, re.I) for q in quality_list])) @@ -359,11 +355,9 @@ class Quality(object): :return: :rtype: int """ - # noinspection PyPep8Naming - import encodingKludge as ek from exceptions_helper import ex from sickgear import logger - if ek.ek(os.path.isfile, filename): + if os.path.isfile(filename): from hachoir.parser import createParser from hachoir.metadata import extractMetadata @@ -372,7 +366,7 @@ class Quality(object): parser = height = None msg = 'Hachoir can\'t parse file "%s" content quality because it found error: %s' try: - parser = ek.ek(createParser, filename) + parser = createParser(filename) except InputStreamError as e: logger.log(msg % (filename, ex(e)), logger.WARNING) except (BaseException, Exception) as e: diff --git a/sickgear/config.py b/sickgear/config.py index 759bc0a5..c98df792 100644 --- a/sickgear/config.py +++ b/sickgear/config.py @@ -18,9 +18,6 @@ import datetime import os.path import re -# noinspection PyPep8Naming -import encodingKludge as ek - import sickgear import sickgear.providers from . import db, helpers, logger, naming @@ -360,7 +357,7 @@ def clean_url(url, add_slash=True): scheme, netloc, path, query, fragment = urlsplit(url, 'http') if not path.endswith('/'): - basename, ext = ek.ek(os.path.splitext, ek.ek(os.path.basename, path)) + basename, ext = os.path.splitext(os.path.basename(path)) if not ext and add_slash: path += '/' @@ -857,14 +854,14 @@ class ConfigMigrator(object): # Migration v16: Purge old cache image folder name @staticmethod def _migrate_v16(): - if sickgear.CACHE_DIR and ek.ek(os.path.isdir, sickgear.CACHE_DIR): + if sickgear.CACHE_DIR and os.path.isdir(sickgear.CACHE_DIR): cache_default = sickgear.CACHE_DIR dead_paths = ['anidb', 'imdb', 'trakt'] for path in dead_paths: sickgear.CACHE_DIR = '%s/images/%s' % (cache_default, path) helpers.clear_cache(True) try: - ek.ek(os.rmdir, sickgear.CACHE_DIR) + os.rmdir(sickgear.CACHE_DIR) except OSError: pass sickgear.CACHE_DIR = cache_default diff --git a/sickgear/databases/mainDB.py b/sickgear/databases/mainDB.py index 8c50d3ba..be2edcf1 100644 --- a/sickgear/databases/mainDB.py +++ b/sickgear/databases/mainDB.py @@ -21,8 +21,6 @@ import re from .. import db, common, logger from ..name_parser.parser import NameParser, InvalidNameException, InvalidShowException import sickgear -# noinspection PyPep8Naming -import encodingKludge as ek from six import iteritems @@ -432,8 +430,8 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): # if there is no size yet then populate it for us if (not cur_result['file_size'] or not int(cur_result['file_size'])) \ - and ek.ek(os.path.isfile, cur_result['location']): - cur_size = ek.ek(os.path.getsize, cur_result['location']) + and os.path.isfile(cur_result['location']): + cur_size = os.path.getsize(cur_result['location']) self.connection.action('UPDATE tv_episodes SET file_size = ? WHERE episode_id = ?', [cur_size, int(cur_result['episode_id'])]) @@ -456,7 +454,7 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): continue nzb_name = cur_result['resource'] - file_name = ek.ek(os.path.basename, download_sql_result[0]['resource']) + file_name = os.path.basename(download_sql_result[0]['resource']) # take the extension off the filename, it's not needed if '.' in file_name: @@ -508,7 +506,7 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): self.upgrade_log(u'Adding release name to all episodes with obvious scene filenames') for cur_result in empty_sql_result: - ep_file_name = ek.ek(os.path.basename, cur_result['location']) + ep_file_name = os.path.basename(cur_result['location']) ep_file_name = os.path.splitext(ep_file_name)[0] # only want to find real scene names here so anything with a space in it is out @@ -1999,7 +1997,7 @@ class ChangeTmdbID(db.SchemaUpgrade): self.upgrade_log('Renaming tmdb images') # noinspection PyProtectedMember for _dir in (ImageCache._persons_dir(), ImageCache._characters_dir()): - for _f in ek.ek(scantree, _dir): # type: DirEntry + for _f in scantree(_dir): # type: DirEntry if not _f.is_file(follow_symlinks=False): continue try: @@ -2010,7 +2008,7 @@ class ChangeTmdbID(db.SchemaUpgrade): continue try: move_file(_f.path, - ek.ek(os.path.join, ek.ek(os.path.dirname, _f.path), + os.path.join(os.path.dirname(_f.path), re.sub('^%s-' % img_src, '%s-' % cache_img_src[(img_src, TVINFO_TMDB)[TVINFO_TMDB_OLD == img_src]], _f.name))) except (BaseException, Exception): diff --git a/sickgear/db.py b/sickgear/db.py index 5cfc0fc5..b9ee5a4e 100644 --- a/sickgear/db.py +++ b/sickgear/db.py @@ -24,8 +24,6 @@ import sqlite3 import threading import time -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex import sickgear @@ -60,7 +58,7 @@ def dbFilename(filename='sickbeard.db', suffix=None): """ if suffix: filename = '%s.%s' % (filename, suffix) - return ek.ek(os.path.join, sickgear.DATA_DIR, filename) + return os.path.join(sickgear.DATA_DIR, filename) def mass_upsert_sql(table_name, value_dict, key_dict, sanitise=True): @@ -136,12 +134,12 @@ class DBConnection(object): logger.log('this python sqlite3 version doesn\'t support backups', logger.DEBUG) return False, 'this python sqlite3 version doesn\'t support backups' - if not ek.ek(os.path.isdir, target): + if not os.path.isdir(target): logger.log('Backup target invalid', logger.ERROR) return False, 'Backup target invalid' - target_db = ek.ek(os.path.join, target, (backup_filename, self.filename)[None is backup_filename]) - if ek.ek(os.path.exists, target_db): + target_db = os.path.join(target, (backup_filename, self.filename)[None is backup_filename]) + if os.path.exists(target_db): logger.log('Backup target file already exists', logger.ERROR) return False, 'Backup target file already exists' @@ -758,14 +756,14 @@ def MigrationCode(my_db): def cleanup_old_db_backups(filename): try: - d, filename = ek.ek(os.path.split, filename) + d, filename = os.path.split(filename) if not d: d = sickgear.DATA_DIR for f in filter_iter(lambda fn: fn.is_file() and filename in fn.name and re.search(r'\.db(\.v\d+)?\.r\d+$', fn.name), - ek.ek(scandir, d)): + scandir(d)): try: - ek.ek(os.unlink, f.path) + os.unlink(f.path) except (BaseException, Exception): pass except (BaseException, Exception): @@ -870,7 +868,7 @@ def backup_all_dbs(target, compress=True, prefer_7z=True): if not success: return False, msg if compress: - full_path = ek.ek(os.path.join, target, name) + full_path = os.path.join(target, name) if not compress_file(full_path, '%s.db' % cur_db, prefer_7z=prefer_7z): return False, 'Failure to compress backup' delete_old_db_backups(target) diff --git a/sickgear/failedProcessor.py b/sickgear/failedProcessor.py index 3ebf1844..b1c7b4d8 100644 --- a/sickgear/failedProcessor.py +++ b/sickgear/failedProcessor.py @@ -69,7 +69,7 @@ class FailedProcessor(LegacyFailedProcessor): """ self._log(u'Failed download detected: (%s, %s)' % (self.nzb_name, self.dir_name)) - releaseName = show_name_helpers.determineReleaseName(self.dir_name, self.nzb_name) + releaseName = show_name_helpers.determine_release_name(self.dir_name, self.nzb_name) if None is releaseName: self._log(u'Warning: unable to find a valid release name.', logger.WARNING) raise exceptions_helper.FailedProcessingFailed() diff --git a/sickgear/helpers.py b/sickgear/helpers.py index 2baa137e..58dd3562 100644 --- a/sickgear/helpers.py +++ b/sickgear/helpers.py @@ -36,8 +36,6 @@ from .common import cpu_presets, mediaExtensions, Overview, Quality, statusStrin ARCHIVED, DOWNLOADED, FAILED, IGNORED, SKIPPED, SNATCHED_ANY, SUBTITLED, UNAIRED, UNKNOWN, WANTED from .sgdatetime import timestamp_near from lib.tvinfo_base.exceptions import * -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex, MultipleShowObjectsException import dateutil.parser @@ -171,7 +169,7 @@ def has_image_ext(filename): :rtype: bool """ try: - if ek.ek(os.path.splitext, filename)[1].lower() in ['.bmp', '.gif', '.jpeg', '.jpg', '.png', '.webp']: + if os.path.splitext(filename)[1].lower() in ['.bmp', '.gif', '.jpeg', '.jpg', '.png', '.webp']: return True except (BaseException, Exception): pass @@ -251,9 +249,9 @@ def make_dir(path): :return: success of creation :rtype: bool """ - if not ek.ek(os.path.isdir, path): + if not os.path.isdir(path): try: - ek.ek(os.makedirs, path) + os.makedirs(path) # do a Synology library update notifiers.NotifierFactory().get('SYNOINDEX').addFolder(path) except OSError: @@ -391,7 +389,7 @@ def link(src_file, dest_file): if 0 == ctypes.windll.kernel32.CreateHardLinkW(text_type(dest_file), text_type(src_file), 0): raise ctypes.WinError() else: - ek.ek(os.link, src_file, dest_file) + os.link(src_file, dest_file) def hardlink_file(src_file, dest_file): @@ -403,7 +401,7 @@ def hardlink_file(src_file, dest_file): :type dest_file: AnyStr """ try: - ek.ek(link, src_file, dest_file) + link(src_file, dest_file) fix_set_group_id(dest_file) except (BaseException, Exception) as e: logger.log(u"Failed to create hardlink of %s at %s: %s. Copying instead." % (src_file, dest_file, ex(e)), @@ -423,10 +421,10 @@ def symlink(src_file, dest_file): import ctypes if ctypes.windll.kernel32.CreateSymbolicLinkW( - text_type(dest_file), text_type(src_file), 1 if ek.ek(os.path.isdir, src_file) else 0) in [0, 1280]: + text_type(dest_file), text_type(src_file), 1 if os.path.isdir(src_file) else 0) in [0, 1280]: raise ctypes.WinError() else: - ek.ek(os.symlink, src_file, dest_file) + os.symlink(src_file, dest_file) def move_and_symlink_file(src_file, dest_file): @@ -438,9 +436,9 @@ def move_and_symlink_file(src_file, dest_file): :type dest_file: AnyStr """ try: - ek.ek(shutil.move, src_file, dest_file) + shutil.move(src_file, dest_file) fix_set_group_id(dest_file) - ek.ek(symlink, dest_file, src_file) + symlink(dest_file, src_file) except (BaseException, Exception): logger.log(u"Failed to create symlink of %s at %s. Copying instead" % (src_file, dest_file), logger.ERROR) copy_file(src_file, dest_file) @@ -461,11 +459,11 @@ def rename_ep_file(cur_path, new_path, old_path_length=0): :rtype: bool """ - # new_dest_dir, new_dest_name = ek.ek(os.path.split, new_path) + # new_dest_dir, new_dest_name = os.path.split(new_path) if 0 == old_path_length or len(cur_path) < old_path_length: # approach from the right - cur_file_name, cur_file_ext = ek.ek(os.path.splitext, cur_path) + cur_file_name, cur_file_ext = os.path.splitext(cur_path) else: # approach from the left cur_file_ext = cur_path[old_path_length:] @@ -473,7 +471,7 @@ def rename_ep_file(cur_path, new_path, old_path_length=0): if cur_file_ext[1:] in subtitleExtensions: # Extract subtitle language from filename - sublang = ek.ek(os.path.splitext, cur_file_name)[1][1:] + sublang = os.path.splitext(cur_file_name)[1][1:] # Check if the language extracted from filename is a valid language try: @@ -485,18 +483,18 @@ def rename_ep_file(cur_path, new_path, old_path_length=0): # put the extension on the incoming file new_path += cur_file_ext - make_path(ek.ek(os.path.dirname, new_path), syno=True) + make_path(os.path.dirname(new_path), syno=True) # move the file try: logger.log(u'Renaming file from %s to %s' % (cur_path, new_path)) - ek.ek(shutil.move, cur_path, new_path) + shutil.move(cur_path, new_path) except (OSError, IOError) as e: logger.log(u"Failed renaming " + cur_path + " to " + new_path + ": " + ex(e), logger.ERROR) return False # clean up any old folders that are empty - delete_empty_folders(ek.ek(os.path.dirname, cur_path)) + delete_empty_folders(os.path.dirname(cur_path)) return True @@ -517,8 +515,8 @@ def delete_empty_folders(check_empty_dir, keep_dir=None): logger.log(u"Trying to clean any empty folders under " + check_empty_dir) # as long as the folder exists and doesn't contain any files, delete it - while ek.ek(os.path.isdir, check_empty_dir) and check_empty_dir != keep_dir: - check_files = ek.ek(os.listdir, check_empty_dir) + while os.path.isdir(check_empty_dir) and check_empty_dir != keep_dir: + check_files = os.listdir(check_empty_dir) if not check_files or (len(check_files) <= len(ignore_items) and all( [check_file in ignore_items for check_file in check_files])): @@ -526,13 +524,13 @@ def delete_empty_folders(check_empty_dir, keep_dir=None): try: logger.log(u"Deleting empty folder: " + check_empty_dir) # need shutil.rmtree when ignore_items is really implemented - ek.ek(os.rmdir, check_empty_dir) + os.rmdir(check_empty_dir) # do a Synology library update notifiers.NotifierFactory().get('SYNOINDEX').deleteFolder(check_empty_dir) except OSError as e: logger.log(u"Unable to delete " + check_empty_dir + ": " + repr(e) + " / " + ex(e), logger.WARNING) break - check_empty_dir = ek.ek(os.path.dirname, check_empty_dir) + check_empty_dir = os.path.dirname(check_empty_dir) else: break @@ -565,7 +563,7 @@ def get_absolute_number_from_season_and_episode(show_obj, season, episode): logger.DEBUG) else: logger.debug('No entries for absolute number in show: %s found using %sx%s' % - (show_obj.unique_name, str(season), str(episode))) + (show_obj.unique_name, str(season), str(episode))) return absolute_number @@ -608,7 +606,7 @@ def sanitize_scene_name(name): # tidy up stuff that doesn't belong in scene names name = re.sub(r'(-?\s|/)', '.', name).replace('&', 'and') - name = re.sub(r"\.\.*", '.', name).rstrip('.') + name = re.sub(r"\.+", '.', name).rstrip('.') return name return '' @@ -675,24 +673,24 @@ def backup_versioned_file(old_file, version): new_file = '%s.v%s' % (old_file, version) - if ek.ek(os.path.isfile, new_file): + if os.path.isfile(new_file): changed_old_db = False for back_nr in range(1, 10000): alt_name = '%s.r%s' % (new_file, back_nr) - if not ek.ek(os.path.isfile, alt_name): + if not os.path.isfile(alt_name): try: shutil.move(new_file, alt_name) changed_old_db = True break except (BaseException, Exception): - if ek.ek(os.path.isfile, new_file): + if os.path.isfile(new_file): continue logger.log('could not rename old backup db file', logger.WARNING) if not changed_old_db: raise Exception('can\'t create a backup of db') - while not ek.ek(os.path.isfile, new_file): - if not ek.ek(os.path.isfile, old_file) or 0 == get_size(old_file): + while not os.path.isfile(new_file): + if not os.path.isfile(old_file) or 0 == get_size(old_file): logger.log(u'No need to create backup', logger.DEBUG) break @@ -724,12 +722,12 @@ def restore_versioned_file(backup_file, version): :return: success :rtype: bool """ - numTries = 0 + num_tries = 0 - new_file, backup_version = ek.ek(os.path.splitext, backup_file) + new_file, backup_version = os.path.splitext(backup_file) restore_file = new_file + '.' + 'v' + str(version) - if not ek.ek(os.path.isfile, new_file): + if not os.path.isfile(new_file): logger.log(u"Not restoring, " + new_file + " doesn't exist", logger.DEBUG) return False @@ -744,8 +742,8 @@ def restore_versioned_file(backup_file, version): logger.WARNING) return False - while not ek.ek(os.path.isfile, new_file): - if not ek.ek(os.path.isfile, restore_file): + while not os.path.isfile(new_file): + if not os.path.isfile(restore_file): logger.log(u"Not restoring, " + restore_file + " doesn't exist", logger.DEBUG) break @@ -756,11 +754,11 @@ def restore_versioned_file(backup_file, version): break except (BaseException, Exception) as e: logger.log(u"Error while trying to restore " + restore_file + ": " + ex(e), logger.WARNING) - numTries += 1 + num_tries += 1 time.sleep(1) logger.log(u"Trying again.", logger.DEBUG) - if 10 <= numTries: + if 10 <= num_tries: logger.log(u"Unable to restore " + restore_file + " to " + new_file + " please do it manually.", logger.ERROR) return False @@ -978,8 +976,8 @@ def is_hidden_folder(folder): :return: Returns True if folder is hidden :rtype: bool """ - if ek.ek(os.path.isdir, folder): - if ek.ek(os.path.basename, folder).startswith('.'): + if os.path.isdir(folder): + if os.path.basename(folder).startswith('.'): return True return False @@ -994,7 +992,7 @@ def real_path(path): :return: the canonicalized absolute pathname :rtype: AnyStr """ - return ek.ek(os.path.normpath, ek.ek(os.path.normcase, ek.ek(os.path.realpath, ek.ek(os.path.expanduser, path)))) + return os.path.normpath(os.path.normcase(os.path.realpath(os.path.expanduser(path)))) def validate_show(show_obj, season=None, episode=None): @@ -1048,7 +1046,7 @@ def clear_cache(force=False): elif direntry.is_dir(**direntry_args) and direntry.name not in ['cheetah', 'sessions', 'indexers']: dirty = dirty or False try: - ek.ek(os.rmdir, direntry.path) + os.rmdir(direntry.path) except OSError: dirty = True @@ -1098,8 +1096,8 @@ def get_size(start_path='.'): :return: size in bytes :rtype: int or long """ - if ek.ek(os.path.isfile, start_path): - return ek.ek(os.path.getsize, start_path) + if os.path.isfile(start_path): + return os.path.getsize(start_path) try: return sum(map((lambda x: x.stat(follow_symlinks=False).st_size), scantree(start_path))) except OSError: @@ -1115,14 +1113,14 @@ def get_media_stats(start_path='.'): :param start_path: path to scan """ - if ek.ek(os.path.isdir, start_path): + if os.path.isdir(start_path): sizes = sorted(map(lambda y: y.stat(follow_symlinks=False).st_size, filter(lambda x: has_media_ext(x.name), scantree(start_path)))) if sizes: return len(sizes), sizes[0], sizes[-1], int(sum(sizes) / len(sizes)) - elif ek.ek(os.path.isfile, start_path): - size = ek.ek(os.path.getsize, start_path) + elif os.path.isfile(start_path): + size = os.path.getsize(start_path) return 1, size, size, size return 0, 0, 0, 0 @@ -1137,7 +1135,7 @@ def remove_article(text=''): :return: text without articles :rtype: AnyStr """ - return re.sub(r'(?i)^(?:(?:A(?!\s+to)n?)|The)\s(\w)', r'\1', text) + return re.sub(r'(?i)^(?:A(?!\s+to)n?|The)\s(\w)', r'\1', text) def re_valid_hostname(with_allowed=True): @@ -1332,11 +1330,11 @@ def cleanup_cache(): Delete old cached files """ delete_not_changed_in( - [ek.ek(os.path.join, sickgear.CACHE_DIR, 'images', 'browse', 'thumb', x) + [os.path.join(sickgear.CACHE_DIR, 'images', 'browse', 'thumb', x) for x in ['anidb', 'imdb', 'trakt', 'tvdb']] + - [ek.ek(os.path.join, sickgear.CACHE_DIR, 'images', x) + [os.path.join(sickgear.CACHE_DIR, 'images', x) for x in ['characters', 'person']] + - [ek.ek(os.path.join, sickgear.CACHE_DIR, 'tvinfo_cache')]) + [os.path.join(sickgear.CACHE_DIR, 'tvinfo_cache')]) def delete_not_changed_in(paths, days=30, minutes=0): @@ -1377,8 +1375,8 @@ def set_file_timestamp(filename, min_age=3, new_time=None): """ min_time = int(timestamp_near((datetime.datetime.now() - datetime.timedelta(days=min_age)))) try: - if ek.ek(os.path.isfile, filename) and ek.ek(os.path.getmtime, filename) < min_time: - ek.ek(os.utime, filename, new_time) + if os.path.isfile(filename) and os.path.getmtime(filename) < min_time: + os.utime(filename, new_time) except (BaseException, Exception): pass @@ -1407,7 +1405,7 @@ def is_link(filepath): :return: True or False """ if 'win32' == sys.platform: - if not ek.ek(os.path.exists, filepath): + if not os.path.exists(filepath): return False import ctypes @@ -1417,7 +1415,7 @@ def is_link(filepath): attr = ctypes.windll.kernel32.GetFileAttributesW(text_type(filepath)) return invalid_file_attributes != attr and 0 != attr & file_attribute_reparse_point - return ek.ek(os.path.islink, filepath) + return os.path.islink(filepath) def df(): @@ -1496,11 +1494,11 @@ def path_mapper(search, replace, subject): :rtype: Tuple[AnyStr, bool] """ delim = '/!~!/' - search = re.sub(r'[\\]', delim, search) - replace = re.sub(r'[\\]', delim, replace) - path = re.sub(r'[\\]', delim, subject) + search = re.sub(r'\\', delim, search) + replace = re.sub(r'\\', delim, replace) + path = re.sub(r'\\', delim, subject) result = re.sub('(?i)^%s' % search, replace, path) - result = ek.ek(os.path.normpath, re.sub(delim, '/', result)) + result = os.path.normpath(re.sub(delim, '/', result)) return result, result != subject @@ -1559,7 +1557,7 @@ def generate_show_dir_name(root_dir, show_name): san_show_name = san_show_name.replace(' ', '.') if None is root_dir: return san_show_name - return ek.ek(os.path.join, root_dir, san_show_name) + return os.path.join(root_dir, san_show_name) def count_files_dirs(base_dir): @@ -1572,7 +1570,7 @@ def count_files_dirs(base_dir): """ f = d = 0 try: - files = ek.ek(scandir, base_dir) + files = scandir(base_dir) except OSError as e: logger.log('Unable to count files %s / %s' % (repr(e), ex(e)), logger.WARNING) else: @@ -1603,8 +1601,8 @@ def upgrade_new_naming(): sickgear.CFG.setdefault('GUI', {})['fanart_ratings'] = '%s' % ne sickgear.CFG.write() - image_cache_dir = ek.ek(os.path.join, sickgear.CACHE_DIR, 'images') - bp_match = re.compile(r'(\d+)\.((?:banner|poster|(?:(?:\d+(?:\.\w*)?\.(?:\w{5,8}))\.)?fanart)\.jpg)', flags=re.I) + image_cache_dir = os.path.join(sickgear.CACHE_DIR, 'images') + bp_match = re.compile(r'(\d+)\.((?:banner|poster|(?:\d+(?:\.\w*)?\.\w{5,8}\.)?fanart)\.jpg)', flags=re.I) def _set_progress(p_msg, c, s): ps = None @@ -1618,14 +1616,14 @@ def upgrade_new_naming(): sickgear.classes.loading_msg.set_msg_progress(p_msg, '{:6.2f}%'.format(ps)) for d in ['', 'thumbnails']: - bd = ek.ek(os.path.join, image_cache_dir, d) - if ek.ek(os.path.isdir, bd): + bd = os.path.join(image_cache_dir, d) + if os.path.isdir(bd): fc, dc = count_files_dirs(bd) step = fc / float(100) cf = 0 p_text = 'Upgrading %s' % (d, 'banner/poster')[not d] _set_progress(p_text, 0, 0) - for entry in ek.ek(scandir, bd): + for entry in scandir(bd): if entry.is_file(): cf += 1 _set_progress(p_text, cf, step) @@ -1634,14 +1632,13 @@ def upgrade_new_naming(): old_id = int(b_s.group(1)) tvid = show_list.get(old_id) if tvid: - nb_dir = ek.ek(os.path.join, sickgear.CACHE_DIR, 'images', 'shows', - '%s-%s' % (tvid, old_id), d) - if not ek.ek(os.path.isdir, nb_dir): + nb_dir = os.path.join(sickgear.CACHE_DIR, 'images', 'shows', '%s-%s' % (tvid, old_id), d) + if not os.path.isdir(nb_dir): try: - ek.ek(os.makedirs, nb_dir) + os.makedirs(nb_dir) except (BaseException, Exception): pass - new_name = ek.ek(os.path.join, nb_dir, bp_match.sub(r'\2', entry.name)) + new_name = os.path.join(nb_dir, bp_match.sub(r'\2', entry.name)) try: move_file(entry.path, new_name) except (BaseException, Exception) as e: @@ -1650,7 +1647,7 @@ def upgrade_new_naming(): else: # clean up files without reference in db try: - ek.ek(os.remove, entry.path) + os.remove(entry.path) except (BaseException, Exception): pass elif entry.is_dir(): @@ -1664,7 +1661,7 @@ def upgrade_new_naming(): p_text = 'Upgrading fanart' _set_progress(p_text, 0, 0) try: - entries = ek.ek(scandir, entry.path) + entries = scandir(entry.path) except OSError as e: logger.log('Unable to stat dirs %s / %s' % (repr(e), ex(e)), logger.WARNING) continue @@ -1676,17 +1673,16 @@ def upgrade_new_naming(): if old_id: new_id = show_list.get(old_id) if new_id: - new_dir_name = ek.ek(os.path.join, sickgear.CACHE_DIR, 'images', 'shows', - '%s-%s' % (new_id, old_id), 'fanart') + new_dir_name = os.path.join(sickgear.CACHE_DIR, 'images', 'shows', + '%s-%s' % (new_id, old_id), 'fanart') try: move_file(d_entry.path, new_dir_name) except (BaseException, Exception) as e: logger.log('Unable to rename %s to %s: %s / %s' % (d_entry.path, new_dir_name, repr(e), ex(e)), logger.WARNING) - if ek.ek(os.path.isdir, new_dir_name): + if os.path.isdir(new_dir_name): try: - f_n = filter_iter(lambda fn: fn.is_file(), - ek.ek(scandir, new_dir_name)) + f_n = filter_iter(lambda fn: fn.is_file(), scandir(new_dir_name)) except OSError as e: logger.log('Unable to rename %s / %s' % (repr(e), ex(e)), logger.WARNING) @@ -1704,20 +1700,20 @@ def upgrade_new_naming(): (args[0], args[1], repr(e), ex(e)), logger.WARNING) else: try: - ek.ek(shutil.rmtree, d_entry.path) + shutil.rmtree(d_entry.path) except (BaseException, Exception): pass try: - ek.ek(shutil.rmtree, d_entry.path) + shutil.rmtree(d_entry.path) except (BaseException, Exception): pass try: - ek.ek(os.rmdir, entry.path) + os.rmdir(entry.path) except (BaseException, Exception): pass if 'thumbnails' == d: try: - ek.ek(os.rmdir, bd) + os.rmdir(bd) except (BaseException, Exception): pass _set_progress(p_text, 0, 1) diff --git a/sickgear/image_cache.py b/sickgear/image_cache.py index 2f6b5b38..8648bd22 100644 --- a/sickgear/image_cache.py +++ b/sickgear/image_cache.py @@ -20,8 +20,6 @@ import os.path import re import zlib -# noinspection PyPep8Naming -import encodingKludge as ek import exceptions_helper from exceptions_helper import ex import sickgear @@ -30,7 +28,6 @@ from . import db, logger from .metadata.generic import GenericMetadata from .sgdatetime import timestamp_near from .indexers.indexer_config import TVINFO_TVDB, TVINFO_TVMAZE, TVINFO_TMDB, TVINFO_IMDB -from lib.tvinfo_base.exceptions import * from six import itervalues, iteritems @@ -56,9 +53,9 @@ class ImageCache(object): characters_dir = None # type: Optional[AnyStr] def __init__(self): - if None is ImageCache.base_dir and ek.ek(os.path.exists, sickgear.CACHE_DIR): - ImageCache.base_dir = ek.ek(os.path.abspath, ek.ek(os.path.join, sickgear.CACHE_DIR, 'images')) - ImageCache.shows_dir = ek.ek(os.path.abspath, ek.ek(os.path.join, self.base_dir, 'shows')) + if None is ImageCache.base_dir and os.path.exists(sickgear.CACHE_DIR): + ImageCache.base_dir = os.path.abspath(os.path.join(sickgear.CACHE_DIR, 'images')) + ImageCache.shows_dir = os.path.abspath(os.path.join(self.base_dir, 'shows')) ImageCache.persons_dir = self._persons_dir() ImageCache.characters_dir = self._characters_dir() @@ -70,17 +67,17 @@ class ImageCache(object): # """ # Builds up the full path to the image cache directory # """ - # return ek.ek(os.path.abspath, ek.ek(os.path.join, sickgear.CACHE_DIR, 'images')) + # return os.path.abspath(os.path.join(sickgear.CACHE_DIR, 'images')) @staticmethod def _persons_dir(): # type: (...) -> AnyStr - return ek.ek(os.path.join, sickgear.CACHE_DIR, 'images', 'person') + return os.path.join(sickgear.CACHE_DIR, 'images', 'person') @staticmethod def _characters_dir(): # type: (...) -> AnyStr - return ek.ek(os.path.join, sickgear.CACHE_DIR, 'images', 'characters') + return os.path.join(sickgear.CACHE_DIR, 'images', 'characters') def _fanart_dir(self, tvid=None, prodid=None): # type: (int, int) -> AnyStr @@ -95,7 +92,7 @@ class ImageCache(object): :rtype: AnyStr or None """ if None not in (tvid, prodid): - return ek.ek(os.path.abspath, ek.ek(os.path.join, self.shows_dir, '%s-%s' % (tvid, prodid), 'fanart')) + return os.path.abspath(os.path.join(self.shows_dir, '%s-%s' % (tvid, prodid), 'fanart')) def _thumbnails_dir(self, tvid, prodid): # type: (int, int) -> AnyStr @@ -109,7 +106,7 @@ class ImageCache(object): :return: path :rtype: AnyStr """ - return ek.ek(os.path.abspath, ek.ek(os.path.join, self.shows_dir, '%s-%s' % (tvid, prodid), 'thumbnails')) + return os.path.abspath(os.path.join(self.shows_dir, '%s-%s' % (tvid, prodid), 'thumbnails')) @staticmethod def _person_base_name(person_obj): @@ -134,7 +131,7 @@ class ImageCache(object): :param base_path: """ filename = '%s.jpg' % base_path or self._person_base_name(person_obj) - return ek.ek(os.path.join, self.persons_dir, filename) + return os.path.join(self.persons_dir, filename) def person_thumb_path(self, person_obj, base_path=None): # type: (Optional[Person], AnyStr) -> AnyStr @@ -144,7 +141,7 @@ class ImageCache(object): :param base_path: """ filename = '%s_thumb.jpg' % base_path or self._person_base_name(person_obj) - return ek.ek(os.path.join, self.persons_dir, filename) + return os.path.join(self.persons_dir, filename) def person_both_paths(self, person_obj): # type: (Person) -> Tuple[AnyStr, AnyStr] @@ -164,7 +161,7 @@ class ImageCache(object): :param base_path: """ filename = '%s.jpg' % base_path or self._character_base_name(character_obj, show_obj) - return ek.ek(os.path.join, self.characters_dir, filename) + return os.path.join(self.characters_dir, filename) def character_thumb_path(self, character_obj, show_obj, base_path=None): # type: (Optional[Character], Optional[TVShow], AnyStr) -> AnyStr @@ -175,7 +172,7 @@ class ImageCache(object): :param base_path: """ filename = '%s_thumb.jpg' % base_path or self._character_base_name(character_obj, show_obj) - return ek.ek(os.path.join, self.characters_dir, filename) + return os.path.join(self.characters_dir, filename) def character_both_path(self, character_obj, show_obj=None, tvid=None, proid=None, person_obj=None): # type: (Character, TVShow, integer_types, integer_types, Person) -> Tuple[AnyStr, AnyStr] @@ -208,7 +205,7 @@ class ImageCache(object): :return: a full path to the cached poster file for the given tvid prodid :rtype: AnyStr """ - return ek.ek(os.path.join, self.shows_dir, '%s-%s' % (tvid, prodid), 'poster.jpg') + return os.path.join(self.shows_dir, '%s-%s' % (tvid, prodid), 'poster.jpg') def banner_path(self, tvid, prodid): # type: (int, int) -> AnyStr @@ -222,7 +219,7 @@ class ImageCache(object): :return: a full path to the cached banner file for the given tvid prodid :rtype: AnyStr """ - return ek.ek(os.path.join, self.shows_dir, '%s-%s' % (tvid, prodid), 'banner.jpg') + return os.path.join(self.shows_dir, '%s-%s' % (tvid, prodid), 'banner.jpg') def fanart_path(self, tvid, prodid, prefix=''): # type: (int, int, Optional[AnyStr]) -> AnyStr @@ -238,7 +235,7 @@ class ImageCache(object): :return: a full path to the cached fanart file for the given tvid prodid :rtype: AnyStr """ - return ek.ek(os.path.join, self._fanart_dir(tvid, prodid), '%s%s' % (prefix, 'fanart.jpg')) + return os.path.join(self._fanart_dir(tvid, prodid), '%s%s' % (prefix, 'fanart.jpg')) def poster_thumb_path(self, tvid, prodid): # type: (int, int) -> AnyStr @@ -252,7 +249,7 @@ class ImageCache(object): :return: a full path to the cached poster file for the given tvid prodid :rtype: AnyStr """ - return ek.ek(os.path.join, self._thumbnails_dir(tvid, prodid), 'poster.jpg') + return os.path.join(self._thumbnails_dir(tvid, prodid), 'poster.jpg') def banner_thumb_path(self, tvid, prodid): # type: (int, int) -> AnyStr @@ -266,7 +263,7 @@ class ImageCache(object): :return: a full path to the cached poster file for the given tvid prodid :rtype: AnyStr """ - return ek.ek(os.path.join, self._thumbnails_dir(tvid, prodid), 'banner.jpg') + return os.path.join(self._thumbnails_dir(tvid, prodid), 'banner.jpg') @staticmethod def has_file(image_file): @@ -278,8 +275,8 @@ class ImageCache(object): :rtype: bool """ result = [] - for filename in ek.ek(glob.glob, image_file): - result.append(ek.ek(os.path.isfile, filename) and filename) + for filename in glob.glob(image_file): + result.append(os.path.isfile(filename) and filename) logger.log(u'Found cached %s' % filename, logger.DEBUG) not any(result) and logger.log(u'No cache for %s' % image_file, logger.DEBUG) @@ -367,7 +364,7 @@ class ImageCache(object): :param image: image file or data :param is_binary: is data instead of path """ - if not is_binary and not ek.ek(os.path.isfile, image): + if not is_binary and not os.path.isfile(image): logger.warning(u'File not found to determine image type of %s' % image) return if not image: @@ -540,7 +537,7 @@ class ImageCache(object): else: sg_helpers.copy_file(image_path, dest_path) - return ek.ek(os.path.isfile, dest_path) and dest_path or None + return os.path.isfile(dest_path) and dest_path or None def _cache_info_source_images(self, show_obj, img_type, num_files=0, max_files=500, force=False, show_infos=None): # type: (TVShow, int, int, int, bool, ShowInfosDict) -> bool @@ -588,7 +585,7 @@ class ImageCache(object): return False crcs = [] - for cache_file_name in ek.ek(glob.glob, dest_path): + for cache_file_name in glob.glob(dest_path): with open(cache_file_name, mode='rb') as resource: crc = '%05X' % (zlib.crc32(resource.read()) & 0xFFFFFFFF) if crc not in crcs: @@ -627,7 +624,7 @@ class ImageCache(object): success += (0, 1)[result] if num_files > max_files: break - total = len(ek.ek(glob.glob, dest_path)) + total = len(glob.glob(dest_path)) logger.log(u'Saved %s fanart images%s. Cached %s of max %s fanart file%s' % (success, ('', ' from ' + ', '.join([x for x in list(set(sources))]))[0 < len(sources)], @@ -696,7 +693,7 @@ class ImageCache(object): cache_path = self.fanart_path(*arg_tvid_prodid).replace('fanart.jpg', '') # num_images = len(fnmatch.filter(os.listdir(cache_path), '*.jpg')) - for cache_dir in ek.ek(glob.glob, cache_path): + for cache_dir in glob.glob(cache_path): if show_obj.tvid_prodid in sickgear.FANART_RATINGS: del (sickgear.FANART_RATINGS[show_obj.tvid_prodid]) result = sg_helpers.remove_file(cache_dir, tree=True) @@ -712,11 +709,11 @@ class ImageCache(object): needed = [] if any([need_images[self.POSTER], need_images[self.BANNER]]): poster_path = cur_provider.get_poster_path(show_obj) - if poster_path not in checked_files and ek.ek(os.path.isfile, poster_path): + if poster_path not in checked_files and os.path.isfile(poster_path): needed += [[False, poster_path]] if need_images[self.FANART]: fanart_path = cur_provider.get_fanart_path(show_obj) - if fanart_path not in checked_files and ek.ek(os.path.isfile, fanart_path): + if fanart_path not in checked_files and os.path.isfile(fanart_path): needed += [[True, fanart_path]] if 0 == len(needed): break diff --git a/sickgear/indexers/indexer_api.py b/sickgear/indexers/indexer_api.py index 3d3e7b64..530faa96 100644 --- a/sickgear/indexers/indexer_api.py +++ b/sickgear/indexers/indexer_api.py @@ -16,10 +16,9 @@ import os from .indexer_config import init_config, tvinfo_config -from sg_helpers import make_path, proxy_setting +from sg_helpers import proxy_setting import sickgear from lib.tvinfo_base import TVInfoBase -import encodingKludge as ek from _23 import list_values @@ -41,8 +40,7 @@ class TVInfoAPI(object): if tvinfo_config[self.tvid]['active'] or ('no_dummy' in kwargs and True is kwargs['no_dummy']): if 'no_dummy' in kwargs: kwargs.pop('no_dummy') - indexer_cache_dir = ek.ek(os.path.join, sickgear.CACHE_DIR, 'tvinfo_cache', - tvinfo_config[self.tvid]['name']) + indexer_cache_dir = os.path.join(sickgear.CACHE_DIR, 'tvinfo_cache', tvinfo_config[self.tvid]['name']) kwargs['diskcache_dir'] = indexer_cache_dir return tvinfo_config[self.tvid]['module'](*args, **kwargs) else: diff --git a/sickgear/logger.py b/sickgear/logger.py index 3aa6791d..39821266 100644 --- a/sickgear/logger.py +++ b/sickgear/logger.py @@ -337,9 +337,8 @@ class TimedCompressedRotatingFileHandler(TimedRotatingFileHandler): except AttributeError: pass - import encodingKludge try: - encodingKludge.ek(os.rename, self.baseFilename, dfn) + os.rename(self.baseFilename, dfn) except (BaseException, Exception): pass @@ -360,9 +359,8 @@ class TimedCompressedRotatingFileHandler(TimedRotatingFileHandler): if 0 < self.backupCount: # find the oldest log file and delete it # phase out files named sickgear.log in favour of sickgear.logs over backup_count days - all_names = encodingKludge.ek(glob.glob, file_name + '_*') + \ - encodingKludge.ek(glob.glob, encodingKludge.ek(os.path.join, encodingKludge.ek( - os.path.dirname, file_name), 'sickbeard_*')) + all_names = glob.glob(file_name + '_*') \ + + glob.glob(os.path.join(os.path.dirname(file_name), 'sickbeard_*')) if len(all_names) > self.backupCount: all_names.sort() self.delete_logfile(all_names[0]) diff --git a/sickgear/metadata/generic.py b/sickgear/metadata/generic.py index a182a389..add8865d 100644 --- a/sickgear/metadata/generic.py +++ b/sickgear/metadata/generic.py @@ -30,8 +30,6 @@ from ..indexers.indexer_config import TVINFO_TVDB, TVINFO_TMDB from lib.tvinfo_base import TVInfoImage, TVInfoImageType, TVInfoImageSize from lib.tvinfo_base.exceptions import * import sickgear -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex from lib.fanart.core import Request as fanartRequest import lib.fanart as fanart @@ -127,13 +125,13 @@ class GenericMetadata(object): def get_id(self): # type: (...) -> AnyStr - return GenericMetadata.makeID(self.name) + return GenericMetadata.make_id(self.name) @staticmethod - def makeID(name): + def make_id(name): # type: (AnyStr) -> AnyStr name_id = re.sub("[+]", "plus", name) - name_id = re.sub(r"[^\w\d_]", "_", name_id).lower() + name_id = re.sub(r"[^\w_]", "_", name_id).lower() return name_id def set_config(self, string): @@ -151,69 +149,69 @@ class GenericMetadata(object): self.season_all_banner = config_list[9] def _has_show_metadata(self, show_obj): - # type: (sickgear.tv.TVShow) -> AnyStr - result = ek.ek(os.path.isfile, self.get_show_file_path(show_obj)) + # type: (sickgear.tv.TVShow) -> bool + result = os.path.isfile(self.get_show_file_path(show_obj)) logger.log(u"Checking if " + self.get_show_file_path(show_obj) + " exists: " + str(result), logger.DEBUG) return result def has_episode_metadata(self, ep_obj): - # type: (sickgear.tv.TVEpisode) -> AnyStr - result = ek.ek(os.path.isfile, self.get_episode_file_path(ep_obj)) + # type: (sickgear.tv.TVEpisode) -> bool + result = os.path.isfile(self.get_episode_file_path(ep_obj)) logger.log(u"Checking if " + self.get_episode_file_path(ep_obj) + " exists: " + str(result), logger.DEBUG) return result def _has_fanart(self, show_obj): - # type: (sickgear.tv.TVShow) -> AnyStr - result = ek.ek(os.path.isfile, self.get_fanart_path(show_obj)) + # type: (sickgear.tv.TVShow) -> bool + result = os.path.isfile(self.get_fanart_path(show_obj)) logger.log(u"Checking if " + self.get_fanart_path(show_obj) + " exists: " + str(result), logger.DEBUG) return result def _has_poster(self, show_obj): - # type: (sickgear.tv.TVShow) -> AnyStr - result = ek.ek(os.path.isfile, self.get_poster_path(show_obj)) + # type: (sickgear.tv.TVShow) -> bool + result = os.path.isfile(self.get_poster_path(show_obj)) logger.log(u"Checking if " + self.get_poster_path(show_obj) + " exists: " + str(result), logger.DEBUG) return result def _has_banner(self, show_obj): - # type: (sickgear.tv.TVShow) -> AnyStr - result = ek.ek(os.path.isfile, self.get_banner_path(show_obj)) + # type: (sickgear.tv.TVShow) -> bool + result = os.path.isfile(self.get_banner_path(show_obj)) logger.log(u"Checking if " + self.get_banner_path(show_obj) + " exists: " + str(result), logger.DEBUG) return result def has_episode_thumb(self, ep_obj): - # type: (sickgear.tv.TVEpisode) -> AnyStr + # type: (sickgear.tv.TVEpisode) -> bool location = self.get_episode_thumb_path(ep_obj) - result = None is not location and ek.ek(os.path.isfile, location) + result = None is not location and os.path.isfile(location) if location: logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG) return result def _has_season_poster(self, show_obj, season): - # type: (sickgear.tv.TVShow,int) -> AnyStr + # type: (sickgear.tv.TVShow,int) -> bool location = self.get_season_poster_path(show_obj, season) - result = None is not location and ek.ek(os.path.isfile, location) + result = None is not location and os.path.isfile(location) if location: logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG) return result def _has_season_banner(self, show_obj, season): - # type: (sickgear.tv.TVShow,int) -> AnyStr + # type: (sickgear.tv.TVShow,int) -> bool location = self.get_season_banner_path(show_obj, season) - result = None is not location and ek.ek(os.path.isfile, location) + result = None is not location and os.path.isfile(location) if location: logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG) return result def _has_season_all_poster(self, show_obj): - # type: (sickgear.tv.TVShow) -> AnyStr - result = ek.ek(os.path.isfile, self.get_season_all_poster_path(show_obj)) + # type: (sickgear.tv.TVShow) -> bool + result = os.path.isfile(self.get_season_all_poster_path(show_obj)) logger.log(u"Checking if " + self.get_season_all_poster_path(show_obj) + " exists: " + str(result), logger.DEBUG) return result def _has_season_all_banner(self, show_obj): - # type: (sickgear.tv.TVShow) -> AnyStr - result = ek.ek(os.path.isfile, self.get_season_all_banner_path(show_obj)) + # type: (sickgear.tv.TVShow) -> bool + result = os.path.isfile(self.get_season_all_banner_path(show_obj)) logger.log(u"Checking if " + self.get_season_all_banner_path(show_obj) + " exists: " + str(result), logger.DEBUG) return result @@ -245,7 +243,7 @@ class GenericMetadata(object): def get_show_file_path(self, show_obj): # type: (sickgear.tv.TVShow) -> AnyStr - return ek.ek(os.path.join, show_obj.location, self._show_metadata_filename) + return os.path.join(show_obj.location, self._show_metadata_filename) def get_episode_file_path(self, ep_obj): # type: (sickgear.tv.TVEpisode) -> AnyStr @@ -253,15 +251,15 @@ class GenericMetadata(object): def get_fanart_path(self, show_obj): # type: (sickgear.tv.TVShow) -> AnyStr - return ek.ek(os.path.join, show_obj.location, self.fanart_name) + return os.path.join(show_obj.location, self.fanart_name) def get_poster_path(self, show_obj): # type: (sickgear.tv.TVShow) -> AnyStr - return ek.ek(os.path.join, show_obj.location, self.poster_name) + return os.path.join(show_obj.location, self.poster_name) def get_banner_path(self, show_obj): # type: (sickgear.tv.TVShow) -> AnyStr - return ek.ek(os.path.join, show_obj.location, self.banner_name) + return os.path.join(show_obj.location, self.banner_name) def get_episode_thumb_path(self, ep_obj): # type: (sickgear.tv.TVEpisode) -> Optional[AnyStr] @@ -269,7 +267,7 @@ class GenericMetadata(object): Returns the path where the episode thumbnail should be stored. ep_obj: a TVEpisode instance for which to create the thumbnail """ - if ek.ek(os.path.isfile, ep_obj.location): + if os.path.isfile(ep_obj.location): tbn_filename = ep_obj.location.rpartition('.') @@ -296,7 +294,7 @@ class GenericMetadata(object): else: season_poster_filename = 'season' + str(season).zfill(2) - return ek.ek(os.path.join, show_obj.location, season_poster_filename + '-poster.jpg') + return os.path.join(show_obj.location, season_poster_filename + '-poster.jpg') def get_season_banner_path(self, show_obj, season): # type: (sickgear.tv.TVShow, int) -> AnyStr @@ -314,15 +312,15 @@ class GenericMetadata(object): else: season_banner_filename = 'season' + str(season).zfill(2) - return ek.ek(os.path.join, show_obj.location, season_banner_filename + '-banner.jpg') + return os.path.join(show_obj.location, season_banner_filename + '-banner.jpg') def get_season_all_poster_path(self, show_obj): # type: (sickgear.tv.TVShow) -> AnyStr - return ek.ek(os.path.join, show_obj.location, self.season_all_poster_name) + return os.path.join(show_obj.location, self.season_all_poster_name) def get_season_all_banner_path(self, show_obj): # type: (sickgear.tv.TVShow) -> AnyStr - return ek.ek(os.path.join, show_obj.location, self.season_all_banner_name) + return os.path.join(show_obj.location, self.season_all_banner_name) def _show_data(self, show_obj): # type: (sickgear.tv.TVShow) -> Optional[Union[bool, etree.Element]] @@ -393,7 +391,7 @@ class GenericMetadata(object): self.name, show_obj.unique_name)) nfo_file_path = self.get_show_file_path(show_obj) - with ek.ek(io.open, nfo_file_path, 'r', encoding='utf8') as xmlFileObj: + with io.open(nfo_file_path, 'r', encoding='utf8') as xmlFileObj: show_xml = etree.ElementTree(file=xmlFileObj) tvid = show_xml.find('indexer') @@ -821,7 +819,7 @@ class GenericMetadata(object): """ # don't bother overwriting it - if not force and ek.ek(os.path.isfile, image_path): + if not force and os.path.isfile(image_path): logger.log(u"Image already exists, not downloading", logger.DEBUG) return False @@ -829,17 +827,17 @@ class GenericMetadata(object): logger.log(u"Unable to retrieve image, skipping", logger.WARNING) return False - image_dir = ek.ek(os.path.dirname, image_path) + image_dir = os.path.dirname(image_path) try: - if not ek.ek(os.path.isdir, image_dir): + if not os.path.isdir(image_dir): logger.log(u"Metadata dir didn't exist, creating it at " + image_dir, logger.DEBUG) - ek.ek(os.makedirs, image_dir) + os.makedirs(image_dir) sg_helpers.chmod_as_parent(image_dir) - outFile = ek.ek(open, image_path, 'wb') - outFile.write(image_data) - outFile.close() + out_file = open(image_path, 'wb') + out_file.write(image_data) + out_file.close() sg_helpers.chmod_as_parent(image_path) except IOError as e: logger.log( @@ -903,13 +901,13 @@ class GenericMetadata(object): try: alt_url = '%swww.%s%s' % re.findall( - r'(https?://)(?:artworks\.)?(thetvdb\.[^/]+/banners/[^\d]+[^.]+)(?:_t)(.*)', _url)[0][0:3] + r'(https?://)(?:artworks\.)?(thetvdb\.[^/]+/banners/\D+[^.]+)_t(.*)', _url)[0][0:3] if alt_url not in _urls[0]: _urls[1].append(alt_url) except (IndexError, Exception): try: alt_url = '%sartworks.%s_t%s' % re.findall( - r'(https?://)(?:www\.)?(thetvdb\.[^/]+/banners/[^\d]+[^.]+)(.*)', _url)[0][0:3] + r'(https?://)(?:www\.)?(thetvdb\.[^/]+/banners/\D+[^.]+)(.*)', _url)[0][0:3] if alt_url not in _urls[0]: _urls[1].append(alt_url) except (IndexError, Exception): @@ -1010,7 +1008,7 @@ class GenericMetadata(object): thumb_url = _de_dupe(thumb_url) if not thumb_url: thumb_url = img_url - yield (img_url, thumb_url) + yield img_url, thumb_url elif img_url: yield img_url @@ -1113,7 +1111,7 @@ class GenericMetadata(object): return result - def retrieveShowMetadata(self, folder): + def retrieve_show_metadata(self, folder): # type: (AnyStr) -> Union[Tuple[int, int, AnyStr], Tuple[None, None, None]] """ Used only when mass adding Existing Shows, @@ -1124,39 +1122,39 @@ class GenericMetadata(object): empty_return = (None, None, None) - metadata_path = ek.ek(os.path.join, folder, self._show_metadata_filename) + metadata_path = os.path.join(folder, self._show_metadata_filename) - if not ek.ek(os.path.isdir, folder) or not ek.ek(os.path.isfile, metadata_path): + if not os.path.isdir(folder) or not os.path.isfile(metadata_path): logger.log(u"Can't load the metadata file from " + repr(metadata_path) + ", it doesn't exist", logger.DEBUG) return empty_return logger.log(u"Loading show info from metadata file in " + folder, logger.DEBUG) try: - with ek.ek(io.open, metadata_path, 'r', encoding='utf8') as xmlFileObj: - showXML = etree.ElementTree(file=xmlFileObj) + with io.open(metadata_path, 'r', encoding='utf8') as xmlFileObj: + show_xml = etree.ElementTree(file=xmlFileObj) - if None is showXML.findtext('title') \ - or all(None is _f for _f in (showXML.find('//uniqueid[@type]'), - showXML.findtext('tvdbid'), - showXML.findtext('id'), - showXML.findtext('indexer'))): + if None is show_xml.findtext('title') \ + or all(None is _f for _f in (show_xml.find('//uniqueid[@type]'), + show_xml.findtext('tvdbid'), + show_xml.findtext('id'), + show_xml.findtext('indexer'))): logger.log(u"Invalid info in tvshow.nfo (missing name or id):" - + str(showXML.findtext('title')) + ' ' - + str(showXML.findtext('indexer')) + ' ' - + str(showXML.findtext('tvdbid')) + ' ' - + str(showXML.findtext('id'))) + + str(show_xml.findtext('title')) + ' ' + + str(show_xml.findtext('indexer')) + ' ' + + str(show_xml.findtext('tvdbid')) + ' ' + + str(show_xml.findtext('id'))) return empty_return - name = showXML.findtext('title') + name = show_xml.findtext('title') try: - tvid = int(showXML.findtext('indexer')) + tvid = int(show_xml.findtext('indexer')) except (BaseException, Exception): tvid = None # handle v2 format of .nfo file - default_source = showXML.find('//uniqueid[@default="true"]') + default_source = show_xml.find('//uniqueid[@default="true"]') if None is not default_source: use_tvid = default_source.attrib.get('type') or tvid if isinstance(use_tvid, string_types): @@ -1166,17 +1164,17 @@ class GenericMetadata(object): if use_tvid and None is not prodid: return use_tvid, prodid, name - prodid = showXML.find('//uniqueid[@type="tvdb"]') + prodid = show_xml.find('//uniqueid[@type="tvdb"]') if None is not prodid: prodid = int(prodid.text) tvid = TVINFO_TVDB - elif None is not showXML.findtext('tvdbid'): - prodid = int(showXML.findtext('tvdbid')) + elif None is not show_xml.findtext('tvdbid'): + prodid = int(show_xml.findtext('tvdbid')) tvid = TVINFO_TVDB - elif None is not showXML.findtext('id'): - prodid = int(showXML.findtext('id')) + elif None is not show_xml.findtext('id'): + prodid = int(show_xml.findtext('id')) try: - tvid = TVINFO_TVDB if [s for s in showXML.findall('.//*') + tvid = TVINFO_TVDB if [s for s in show_xml.findall('.//*') if s.text and -1 != s.text.find('thetvdb.com')] else tvid except (BaseException, Exception): pass diff --git a/sickgear/metadata/kodi.py b/sickgear/metadata/kodi.py index 15f0e3cc..e679ebeb 100644 --- a/sickgear/metadata/kodi.py +++ b/sickgear/metadata/kodi.py @@ -25,8 +25,6 @@ import sg_helpers from ..indexers.indexer_config import TVINFO_IMDB, TVINFO_TVDB from lib.tvinfo_base.exceptions import * import sickgear -# noinspection PyPep8Naming -import encodingKludge as ek import exceptions_helper from exceptions_helper import ex from lxml_etree import etree @@ -472,8 +470,8 @@ def remove_default_attr(*args, **kwargs): if nfo_path: # show try: - if ek.ek(os.path.isfile, nfo_path): - with ek.ek(io.open, nfo_path, 'r', encoding='utf8') as xml_file_obj: + if os.path.isfile(nfo_path): + with io.open(nfo_path, 'r', encoding='utf8') as xml_file_obj: xmltree = etree.ElementTree(file=xml_file_obj) # remove default="" attributes @@ -519,8 +517,8 @@ def remove_default_attr(*args, **kwargs): try: changed = False nfo_path = kodi.get_episode_file_path(cur_ep_obj) - if nfo_path and ek.ek(os.path.isfile, nfo_path): - with ek.ek(io.open, nfo_path, 'r', encoding='utf8') as xml_file_obj: + if nfo_path and os.path.isfile(nfo_path): + with io.open(nfo_path, 'r', encoding='utf8') as xml_file_obj: xmltree = etree.ElementTree(file=xml_file_obj) # remove default="" attributes @@ -573,8 +571,8 @@ def rebuild_nfo(*args, **kwargs): try: nfo_path = kodi.get_show_file_path(cur_show_obj) - if nfo_path and ek.ek(os.path.isfile, nfo_path): - with ek.ek(io.open, nfo_path, 'r', encoding='utf8') as xml_file_obj: + if nfo_path and os.path.isfile(nfo_path): + with io.open(nfo_path, 'r', encoding='utf8') as xml_file_obj: xmltree = etree.ElementTree(file=xml_file_obj) # check xml keys exist to validate file as type Kodi episode or tvshow .nfo diff --git a/sickgear/metadata/mediabrowser.py b/sickgear/metadata/mediabrowser.py index ad73b059..d3a2947a 100644 --- a/sickgear/metadata/mediabrowser.py +++ b/sickgear/metadata/mediabrowser.py @@ -24,8 +24,6 @@ from .. import logger import sg_helpers from lib.tvinfo_base.exceptions import * import sickgear -# noinspection PyPep8Naming -import encodingKludge as ek import exceptions_helper from exceptions_helper import ex from lxml_etree import etree @@ -98,7 +96,7 @@ class MediaBrowserMetadata(generic.GenericMetadata): self.eg_season_all_banner = "not supported" # type: AnyStr # Override with empty methods for unsupported features - def retrieveShowMetadata(self, folder): + def retrieve_show_metadata(self, folder): # type: (AnyStr) -> Tuple[None, None, None] # while show metadata is generated, it is not supported for our lookup return None, None, None @@ -120,10 +118,10 @@ class MediaBrowserMetadata(generic.GenericMetadata): ep_obj: a TVEpisode object to get the path for """ - if ek.ek(os.path.isfile, ep_obj.location): - xml_file_name = sg_helpers.replace_extension(ek.ek(os.path.basename, ep_obj.location), self._ep_nfo_extension) - metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), 'metadata') - xml_file_path = ek.ek(os.path.join, metadata_dir_name, xml_file_name) + if os.path.isfile(ep_obj.location): + xml_file_name = sg_helpers.replace_extension(os.path.basename(ep_obj.location), self._ep_nfo_extension) + metadata_dir_name = os.path.join(os.path.dirname(ep_obj.location), 'metadata') + xml_file_path = os.path.join(metadata_dir_name, xml_file_name) else: logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG) return '' @@ -139,10 +137,10 @@ class MediaBrowserMetadata(generic.GenericMetadata): ep_obj: a TVEpisode object to get the path from """ - if ek.ek(os.path.isfile, ep_obj.location): - metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), 'metadata') - tbn_file_name = sg_helpers.replace_extension(ek.ek(os.path.basename, ep_obj.location), 'jpg') - return ek.ek(os.path.join, metadata_dir_name, tbn_file_name) + if os.path.isfile(ep_obj.location): + metadata_dir_name = os.path.join(os.path.dirname(ep_obj.location), 'metadata') + tbn_file_name = sg_helpers.replace_extension(os.path.basename(ep_obj.location), 'jpg') + return os.path.join(metadata_dir_name, tbn_file_name) def get_season_poster_path(self, show_obj, season): # type: (sickgear.tv.TVShow, int) -> Optional[AnyStr] @@ -152,8 +150,7 @@ class MediaBrowserMetadata(generic.GenericMetadata): If no season folder exists, None is returned """ - dir_list = [x for x in ek.ek(os.listdir, show_obj.location) if - ek.ek(os.path.isdir, ek.ek(os.path.join, show_obj.location, x))] + dir_list = [x for x in os.listdir(show_obj.location) if os.path.isdir(os.path.join(show_obj.location, x))] season_dir_regex = r'^Season\s+(\d+)$' @@ -183,7 +180,7 @@ class MediaBrowserMetadata(generic.GenericMetadata): logger.log(u"Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG) - return ek.ek(os.path.join, show_obj.location, season_dir, 'folder.jpg') + return os.path.join(show_obj.location, season_dir, 'folder.jpg') def get_season_banner_path(self, show_obj, season): # type: (sickgear.tv.TVShow, int) -> Optional[AnyStr] @@ -193,8 +190,7 @@ class MediaBrowserMetadata(generic.GenericMetadata): If no season folder exists, None is returned """ - dir_list = [x for x in ek.ek(os.listdir, show_obj.location) if - ek.ek(os.path.isdir, ek.ek(os.path.join, show_obj.location, x))] + dir_list = [x for x in os.listdir(show_obj.location) if os.path.isdir(os.path.join(show_obj.location, x))] season_dir_regex = r'^Season\s+(\d+)$' @@ -224,7 +220,7 @@ class MediaBrowserMetadata(generic.GenericMetadata): logger.log(u"Using " + str(season_dir) + "/banner.jpg as season dir for season " + str(season), logger.DEBUG) - return ek.ek(os.path.join, show_obj.location, season_dir, 'banner.jpg') + return os.path.join(show_obj.location, season_dir, 'banner.jpg') def _show_data(self, show_obj): # type: (sickgear.tv.TVShow) -> Optional[Union[bool, etree.Element]] diff --git a/sickgear/metadata/ps3.py b/sickgear/metadata/ps3.py index 4e91cdb2..8941cbc8 100644 --- a/sickgear/metadata/ps3.py +++ b/sickgear/metadata/ps3.py @@ -17,8 +17,6 @@ import os from . import generic -# noinspection PyPep8Naming -import encodingKludge as ek import sickgear # noinspection PyUnreachableCode @@ -79,7 +77,7 @@ class PS3Metadata(generic.GenericMetadata): self.eg_season_all_banner = "not supported" # type: AnyStr # Override with empty methods for unsupported features - def retrieveShowMetadata(self, folder): + def retrieve_show_metadata(self, folder): # type: (AnyStr) -> Tuple[None, None, None] # no show metadata generated, we abort this lookup function return None, None, None @@ -132,7 +130,7 @@ class PS3Metadata(generic.GenericMetadata): ep_obj: a TVEpisode instance for which to create the thumbnail """ - if ek.ek(os.path.isfile, ep_obj.location): + if os.path.isfile(ep_obj.location): tbn_filename = ep_obj.location + ".cover.jpg" else: return None diff --git a/sickgear/metadata/tivo.py b/sickgear/metadata/tivo.py index 4a3f78e9..eced781d 100644 --- a/sickgear/metadata/tivo.py +++ b/sickgear/metadata/tivo.py @@ -25,8 +25,6 @@ from .. import logger import sg_helpers from lib.tvinfo_base.exceptions import * import sickgear -# noinspection PyPep8Naming -import encodingKludge as ek import exceptions_helper from exceptions_helper import ex @@ -89,7 +87,7 @@ class TIVOMetadata(generic.GenericMetadata): self.eg_season_all_banner = "not supported" # type: AnyStr # Override with empty methods for unsupported features - def retrieveShowMetadata(self, folder): + def retrieve_show_metadata(self, folder): # type: (AnyStr) -> Tuple[None, None, None] # no show metadata generated, we abort this lookup function return None, None, None @@ -155,10 +153,10 @@ class TIVOMetadata(generic.GenericMetadata): ep_obj: a TVEpisode object to get the path for """ - if ek.ek(os.path.isfile, ep_obj.location): - metadata_file_name = ek.ek(os.path.basename, ep_obj.location) + "." + self._ep_nfo_extension - metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), '.meta') - metadata_file_path = ek.ek(os.path.join, metadata_dir_name, metadata_file_name) + if os.path.isfile(ep_obj.location): + metadata_file_name = os.path.basename(ep_obj.location) + "." + self._ep_nfo_extension + metadata_dir_name = os.path.join(os.path.dirname(ep_obj.location), '.meta') + metadata_file_path = os.path.join(metadata_dir_name, metadata_file_name) else: logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG) return '' @@ -335,17 +333,17 @@ class TIVOMetadata(generic.GenericMetadata): return False nfo_file_path = self.get_episode_file_path(ep_obj) - nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path) + nfo_file_dir = os.path.dirname(nfo_file_path) try: - if not ek.ek(os.path.isdir, nfo_file_dir): + if not os.path.isdir(nfo_file_dir): logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG) - ek.ek(os.makedirs, nfo_file_dir) + os.makedirs(nfo_file_dir) sg_helpers.chmod_as_parent(nfo_file_dir) logger.log(u"Writing episode nfo file to " + nfo_file_path, logger.DEBUG) - with ek.ek(open, nfo_file_path, 'w') as nfo_file: + with open(nfo_file_path, 'w') as nfo_file: # Calling encode directly, b/c often descriptions have wonky characters. nfo_file.write(data.encode("utf-8")) diff --git a/sickgear/metadata/wdtv.py b/sickgear/metadata/wdtv.py index 97ae9611..0864e43d 100644 --- a/sickgear/metadata/wdtv.py +++ b/sickgear/metadata/wdtv.py @@ -24,8 +24,6 @@ from .. import logger import sg_helpers from lib.tvinfo_base.exceptions import * import sickgear -# noinspection PyPep8Naming -import encodingKludge as ek import exceptions_helper from exceptions_helper import ex from lxml_etree import etree @@ -92,7 +90,7 @@ class WDTVMetadata(generic.GenericMetadata): self.eg_season_all_banner = "not supported" # type: AnyStr # Override with empty methods for unsupported features - def retrieveShowMetadata(self, folder): + def retrieve_show_metadata(self, folder): # type: (AnyStr) -> Tuple[None, None, None] # no show metadata generated, we abort this lookup function return None, None, None @@ -137,7 +135,7 @@ class WDTVMetadata(generic.GenericMetadata): ep_obj: a TVEpisode instance for which to create the thumbnail """ - if ek.ek(os.path.isfile, ep_obj.location): + if os.path.isfile(ep_obj.location): return sg_helpers.replace_extension(ep_obj.location, 'metathumb') def get_season_poster_path(self, show_obj, season): @@ -148,8 +146,7 @@ class WDTVMetadata(generic.GenericMetadata): If no season folder exists, None is returned """ - dir_list = [x for x in ek.ek(os.listdir, show_obj.location) if - ek.ek(os.path.isdir, ek.ek(os.path.join, show_obj.location, x))] + dir_list = [x for x in os.listdir(show_obj.location) if os.path.isdir(os.path.join(show_obj.location, x))] season_dir_regex = r'^Season\s+(\d+)$' @@ -176,7 +173,7 @@ class WDTVMetadata(generic.GenericMetadata): logger.log(u"Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG) - return ek.ek(os.path.join, show_obj.location, season_dir, 'folder.jpg') + return os.path.join(show_obj.location, season_dir, 'folder.jpg') def _ep_data(self, ep_obj): # type: (sickgear.tv.TVEpisode) -> Optional[Union[bool, etree.Element]] diff --git a/sickgear/metadata/xbmc.py b/sickgear/metadata/xbmc.py index 99445335..ae5de5a3 100644 --- a/sickgear/metadata/xbmc.py +++ b/sickgear/metadata/xbmc.py @@ -20,8 +20,6 @@ import os from . import generic, xbmc_12plus import sg_helpers import sickgear -# noinspection PyPep8Naming -import encodingKludge as ek # noinspection PyUnreachableCode if False: @@ -104,7 +102,7 @@ class XBMCMetadata(xbmc_12plus.XBMC12PlusMetadata): ep_obj: a TVEpisode instance for which to create the thumbnail """ - if ek.ek(os.path.isfile, ep_obj.location): + if os.path.isfile(ep_obj.location): tbn_filename = sg_helpers.replace_extension(ep_obj.location, 'tbn') else: return None @@ -127,7 +125,7 @@ class XBMCMetadata(xbmc_12plus.XBMC12PlusMetadata): else: season_poster_filename = 'season' + str(season).zfill(2) - return ek.ek(os.path.join, show_obj.location, season_poster_filename + '.tbn') + return os.path.join(show_obj.location, season_poster_filename + '.tbn') # present a standard "interface" from the module diff --git a/sickgear/name_parser/parser.py b/sickgear/name_parser/parser.py index 676a878d..8d63bb59 100644 --- a/sickgear/name_parser/parser.py +++ b/sickgear/name_parser/parser.py @@ -32,8 +32,6 @@ except ImportError: regex = None from . import regexes -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex import sickgear from .. import common, db, helpers, logger, scene_exceptions, scene_numbering @@ -381,7 +379,7 @@ class NameParser(object): season_number = int(ep_obj['seasonnumber']) episode_numbers = [int(ep_obj['episodenumber'])] - except BaseTVinfoEpisodenotfound as e: + except BaseTVinfoEpisodenotfound: logger.warning(u'Unable to find episode with date %s for show %s, skipping' % (best_result.air_date, show_obj.unique_name)) episode_numbers = [] @@ -581,7 +579,7 @@ class NameParser(object): return cached # break it into parts if there are any (dirname, file name, extension) - dir_name, file_name = ek.ek(os.path.split, name) + dir_name, file_name = os.path.split(name) if self.file_name: base_file_name = helpers.remove_extension(file_name) @@ -596,7 +594,7 @@ class NameParser(object): file_name_result = self._parse_string(base_file_name) # use only the direct parent dir - dir_name = ek.ek(os.path.basename, dir_name) + dir_name = os.path.basename(dir_name) # parse the dirname for extra info if needed dir_name_result = self._parse_string(dir_name) diff --git a/sickgear/naming.py b/sickgear/naming.py index 0bddae7d..9a24e43c 100644 --- a/sickgear/naming.py +++ b/sickgear/naming.py @@ -22,9 +22,6 @@ from . import common, logger, tv from .common import Quality, DOWNLOADED from .name_parser.parser import NameParser -# noinspection PyPep8Naming -import encodingKludge as ek - # noinspection PyUnreachableCode if False: from typing import AnyStr, Dict, List @@ -239,7 +236,7 @@ def validate_name(pattern, multi=None, anime_type=None, file_only=False, abd=Fal new_name = u'%s.ext' % sample_ep_obj.formatted_filename(pattern, multi, anime_type) new_path = sample_ep_obj.formatted_dir(pattern, multi) if not file_only: - new_name = ek.ek(os.path.join, new_path, new_name) + new_name = os.path.join(new_path, new_name) if not new_name: logger.log(u'Unable to create a name out of %s' % pattern, logger.DEBUG) diff --git a/sickgear/network_timezones.py b/sickgear/network_timezones.py index 9dda4d22..04c70aef 100644 --- a/sickgear/network_timezones.py +++ b/sickgear/network_timezones.py @@ -25,8 +25,6 @@ import sickgear from . import db, helpers, logger from sg_helpers import int_to_time -# noinspection PyPep8Naming -import encodingKludge as ek from lib.dateutil import tz, zoneinfo from lib.tzlocal import get_localzone @@ -126,8 +124,8 @@ def get_utc(): pass if isinstance(utc, datetime.tzinfo): return utc - tz_utc_file = ek.ek(os.path.join, ek.ek(os.path.dirname, zoneinfo.__file__), 'Greenwich') - if ek.ek(os.path.isfile, tz_utc_file): + tz_utc_file = os.path.join(os.path.dirname(zoneinfo.__file__), 'Greenwich') + if os.path.isfile(tz_utc_file): return tz.tzfile(tz_utc_file) @@ -154,7 +152,7 @@ def _remove_old_zoneinfo(): """ if None is not zoneinfo.ZONEFILENAME: current_file = helpers.real_path( - ek.ek(os.path.join, sickgear.ZONEINFO_DIR, ek.ek(os.path.basename, zoneinfo.ZONEFILENAME))) + os.path.join(sickgear.ZONEINFO_DIR, os.path.basename(zoneinfo.ZONEFILENAME))) for entry in chain.from_iterable([scantree(helpers.real_path(_dir), include=r'\.tar\.gz$', filter_kind=False) for _dir in (sickgear.ZONEINFO_DIR, )]): # type: DirEntry if current_file != entry.path: @@ -192,9 +190,9 @@ def _update_zoneinfo(): current_file = zoneinfo.ZONEFILENAME if None is not current_file: - current_file = ek.ek(os.path.basename, current_file) - zonefile = helpers.real_path(ek.ek(os.path.join, sickgear.ZONEINFO_DIR, current_file)) - zonemetadata = None if not ek.ek(os.path.isfile, zonefile) else \ + current_file = os.path.basename(current_file) + zonefile = helpers.real_path(os.path.join(sickgear.ZONEINFO_DIR, current_file)) + zonemetadata = None if not os.path.isfile(zonefile) else \ zoneinfo.ZoneInfoFile(zoneinfo.getzoneinfofile_stream()).metadata newtz_regex = re.search(r'(\d{4}[^.]+)', new_zoneinfo) @@ -220,7 +218,7 @@ def _update_zoneinfo(): if not helpers.download_file(url_tar, zonefile_tmp): return - if not ek.ek(os.path.exists, zonefile_tmp): + if not os.path.exists(zonefile_tmp): logger.log(u'Download of %s failed.' % zonefile_tmp, logger.ERROR) return @@ -233,7 +231,7 @@ def _update_zoneinfo(): if None is not current_file: remove_file_perm(zonefile) # rename downloaded file - ek.ek(os.rename, zonefile_tmp, zonefile) + os.rename(zonefile_tmp, zonefile) setattr(zoneinfo, '_CLASS_ZONE_INSTANCE', list()) tz.gettz.cache_clear() from dateutil.zoneinfo import get_zonefile_instance @@ -612,7 +610,6 @@ def get_episode_time(d, # type: int return SGDatetime.from_timestamp(ep_timestamp, tzinfo=tzinfo, tz_aware=True, local_time=False) except OverflowError: logger.debug('Invalid timestamp: %s, using fallback' % ep_timestamp) - ep_timestamp = None ep_time = None if isinstance(ep_airtime, integer_types): diff --git a/sickgear/notifiers/__init__.py b/sickgear/notifiers/__init__.py index 342e18e8..1b56c4f5 100644 --- a/sickgear/notifiers/__init__.py +++ b/sickgear/notifiers/__init__.py @@ -24,8 +24,6 @@ from . import emby, kodi, plex, xbmc, \ discord, emailnotify, gitter, libnotify, growl, prowl, slack, telegram, trakt import sickgear -# noinspection PyPep8Naming -import encodingKludge as ek from _23 import filter_iter, list_values @@ -159,7 +157,7 @@ def notify_update_library(ep_obj, flush_q=False): continue shows.add(show_name) else: - parent_dir = re.sub(r'[/\\]+%s.*' % show_name, '', ek.ek(os.path.dirname, location)) + parent_dir = re.sub(r'[/\\]+%s.*' % show_name, '', os.path.dirname(location)) parent_dir = re.sub(r'^(.{,2})[/\\]', '', parent_dir) if parent_dir in locations: continue diff --git a/sickgear/notifiers/plex.py b/sickgear/notifiers/plex.py index f9ad58cf..5eaf646c 100644 --- a/sickgear/notifiers/plex.py +++ b/sickgear/notifiers/plex.py @@ -18,7 +18,6 @@ import re from .generic import Notifier import sickgear -from encodingKludge import fixStupidEncodings from exceptions_helper import ex from _23 import b64encodestring, decode_str, etree, filter_iter, list_values, unquote_plus, urlencode @@ -73,7 +72,7 @@ class PLEXNotifier(Notifier): return True except (urllib.error.URLError, IOError) as e: - self._log_warning(u'Couldn\'t contact Plex at ' + fixStupidEncodings(url) + ' ' + ex(e)) + self._log_warning(u'Couldn\'t contact Plex at ' + url + ' ' + ex(e)) return False @staticmethod diff --git a/sickgear/notifiers/pytivo.py b/sickgear/notifiers/pytivo.py index 9a512d3d..b05dddaa 100644 --- a/sickgear/notifiers/pytivo.py +++ b/sickgear/notifiers/pytivo.py @@ -18,8 +18,6 @@ import os from .generic import BaseNotifier import sickgear -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex from _23 import urlencode @@ -51,7 +49,7 @@ class PyTivoNotifier(BaseNotifier): show_path = ep_obj.show_obj.location show_name = ep_obj.show_obj.name - root_show_and_season = ek.ek(os.path.dirname, ep_obj.location) + root_show_and_season = os.path.dirname(ep_obj.location) abs_path = ep_obj.location # Some show names have colons in them which are illegal in a path location, so strip them out. diff --git a/sickgear/notifiers/synoindex.py b/sickgear/notifiers/synoindex.py index 8e0c9fbf..6e4bd5a9 100644 --- a/sickgear/notifiers/synoindex.py +++ b/sickgear/notifiers/synoindex.py @@ -18,8 +18,6 @@ import os from .generic import BaseNotifier -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex from sg_helpers import cmdline_runner @@ -35,7 +33,7 @@ class SynoIndexNotifier(BaseNotifier): def _cmdline_run(self, synoindex_cmd): self._log_debug(u'Executing command ' + str(synoindex_cmd)) - self._log_debug(u'Absolute path to command: ' + ek.ek(os.path.abspath, synoindex_cmd[0])) + self._log_debug(u'Absolute path to command: ' + os.path.abspath(synoindex_cmd[0])) try: output, err, exit_status = cmdline_runner(synoindex_cmd) self._log_debug(u'Script result: %s' % output) @@ -44,8 +42,7 @@ class SynoIndexNotifier(BaseNotifier): def _move_object(self, old_path, new_path): if self.is_enabled(): - self._cmdline_run(['/usr/syno/bin/synoindex', '-N', ek.ek(os.path.abspath, new_path), - ek.ek(os.path.abspath, old_path)]) + self._cmdline_run(['/usr/syno/bin/synoindex', '-N', os.path.abspath(new_path), os.path.abspath(old_path)]) def deleteFolder(self, cur_path): self._make_object('-D', cur_path) @@ -61,7 +58,7 @@ class SynoIndexNotifier(BaseNotifier): def _make_object(self, cmd_arg, cur_path): if self.is_enabled(): - self._cmdline_run(['/usr/syno/bin/synoindex', cmd_arg, ek.ek(os.path.abspath, cur_path)]) + self._cmdline_run(['/usr/syno/bin/synoindex', cmd_arg, os.path.abspath(cur_path)]) def update_library(self, ep_obj=None, **kwargs): self.addFile(ep_obj.location) diff --git a/sickgear/notifiers/synologynotifier.py b/sickgear/notifiers/synologynotifier.py index bc6a6d9b..1aacb2f8 100644 --- a/sickgear/notifiers/synologynotifier.py +++ b/sickgear/notifiers/synologynotifier.py @@ -18,8 +18,6 @@ import os from .generic import Notifier -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex from sg_helpers import cmdline_runner @@ -30,7 +28,7 @@ class SynologyNotifier(Notifier): synodsmnotify_cmd = ['/usr/syno/bin/synodsmnotify', '@administrators', title, body] self._log(u'Executing command ' + str(synodsmnotify_cmd)) - self._log_debug(u'Absolute path to command: ' + ek.ek(os.path.abspath, synodsmnotify_cmd[0])) + self._log_debug(u'Absolute path to command: ' + os.path.abspath(synodsmnotify_cmd[0])) try: output, err, exit_status = cmdline_runner(synodsmnotify_cmd) self._log_debug(u'Script result: %s' % output) diff --git a/sickgear/notifiers/telegram.py b/sickgear/notifiers/telegram.py index 6cd31518..f528864e 100644 --- a/sickgear/notifiers/telegram.py +++ b/sickgear/notifiers/telegram.py @@ -22,8 +22,6 @@ import re from ..common import USER_AGENT from .generic import Notifier -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex import sickgear from sickgear.image_cache import ImageCache @@ -51,11 +49,11 @@ class TelegramNotifier(Notifier): msg = re.sub('(?i) ?', ' ', msg) if use_icon: - image_path = ek.ek(os.path.join, sickgear.PROG_DIR, 'gui', 'slick', 'images', 'banner_thumb.jpg') + image_path = os.path.join(sickgear.PROG_DIR, 'gui', 'slick', 'images', 'banner_thumb.jpg') if not self._testing: show_obj = ep_obj.show_obj banner_path = ImageCache().banner_thumb_path(show_obj.tvid, show_obj.prodid) - if ek.ek(os.path.isfile, banner_path): + if os.path.isfile(banner_path): image_path = banner_path with open(image_path, 'rb') as f: diff --git a/sickgear/notifiers/xbmc.py b/sickgear/notifiers/xbmc.py index 3b18c51c..71b24718 100644 --- a/sickgear/notifiers/xbmc.py +++ b/sickgear/notifiers/xbmc.py @@ -20,7 +20,6 @@ import time from .generic import Notifier import sickgear from exceptions_helper import ex -from encodingKludge import fixStupidEncodings from json_helper import json_dumps, json_load from _23 import b64encodestring, decode_str, etree, quote, unquote, unquote_plus, urlencode @@ -163,9 +162,9 @@ class XBMCNotifier(Notifier): # if we have a password, use authentication if password: req.add_header('Authorization', 'Basic %s' % b64encodestring('%s:%s' % (username, password))) - self._log_debug(u'Contacting (with auth header) via url: ' + fixStupidEncodings(url)) + self._log_debug(u'Contacting (with auth header) via url: ' + url) else: - self._log_debug(u'Contacting via url: ' + fixStupidEncodings(url)) + self._log_debug(u'Contacting via url: ' + url) http_response_obj = urllib.request.urlopen(req) # PY2 http_response_obj has no `with` context manager result = decode_str(http_response_obj.read(), sickgear.SYS_ENCODING) @@ -175,7 +174,7 @@ class XBMCNotifier(Notifier): return result except (urllib.error.URLError, IOError) as e: - self._log_warning(u'Couldn\'t contact HTTP at %s %s' % (fixStupidEncodings(url), ex(e))) + self._log_warning(u'Couldn\'t contact HTTP at %s %s' % (url, ex(e))) return False def _update_library_http(self, host=None, show_name=None): @@ -303,9 +302,9 @@ class XBMCNotifier(Notifier): # if we have a password, use authentication if password: req.add_header('Authorization', 'Basic %s' % b64encodestring('%s:%s' % (username, password))) - self._log_debug(u'Contacting (with auth header) via url: ' + fixStupidEncodings(url)) + self._log_debug(u'Contacting (with auth header) via url: ' + url) else: - self._log_debug(u'Contacting via url: ' + fixStupidEncodings(url)) + self._log_debug(u'Contacting via url: ' + url) try: http_response_obj = urllib.request.urlopen(req) # PY2 http_response_obj has no `with` context manager @@ -324,7 +323,7 @@ class XBMCNotifier(Notifier): return False except IOError as e: - self._log_warning(u'Couldn\'t contact JSON API at ' + fixStupidEncodings(url) + ' ' + ex(e)) + self._log_warning(u'Couldn\'t contact JSON API at ' + url + ' ' + ex(e)) return False def _update_library_json(self, host=None, show_name=None): diff --git a/sickgear/nzbSplitter.py b/sickgear/nzbSplitter.py index d1447e8b..da179d7c 100644 --- a/sickgear/nzbSplitter.py +++ b/sickgear/nzbSplitter.py @@ -21,8 +21,6 @@ import re from lxml_etree import etree -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex import sickgear @@ -112,9 +110,9 @@ def getSeasonNZBs(name, url_data, season): cur_ep = match.group(1) fn = name_extractor(cur_file.get('subject', '')) if cur_ep == re.sub(r'\+\d+\.par2$', '', fn, flags=re.I): - bn, ext = ek.ek(os.path.splitext, fn) + bn, ext = os.path.splitext(fn) cur_ep = re.sub(r'\.(part\d+|vol\d+(\+\d+)?)$', '', bn, flags=re.I) - bn, ext = ek.ek(os.path.splitext, cur_ep) + bn, ext = os.path.splitext(cur_ep) if isinstance(ext, string_types) \ and re.search(r'^\.(nzb|r\d{2}|rar|7z|zip|par2|vol\d+|nfo|srt|txt|bat|sh|mkv|mp4|avi|wmv)$', ext, flags=re.I): @@ -155,7 +153,7 @@ def saveNZB(nzb_name, nzb_string): :type nzb_string: AnyStr """ try: - with ek.ek(open, nzb_name + '.nzb', 'w') as nzb_fh: + with open(nzb_name + '.nzb', 'w') as nzb_fh: nzb_fh.write(nzb_string) except EnvironmentError as e: diff --git a/sickgear/piper.py b/sickgear/piper.py index 6ac111e9..99ee0ee3 100644 --- a/sickgear/piper.py +++ b/sickgear/piper.py @@ -1,13 +1,5 @@ import sys -# noinspection PyPep8Naming -import encodingKludge as ek - -if ek.EXIT_BAD_ENCODING: - print('Sorry, you MUST add the SickGear folder to the PYTHONPATH environment variable') - print('or find another way to force Python to use %s for string encoding.' % ek.SYS_ENCODING) - sys.exit(1) - # ################################# # Sanity check passed, can continue # ################################# @@ -32,7 +24,7 @@ def is_pip_ok(): :return: True if pip is ok """ - pip_ok = '/' != ek.ek(os.path.expanduser, '~') + pip_ok = '/' != os.path.expanduser('~') if pip_ok: pip_version, _, _ = _get_pip_version() if not pip_version: @@ -115,7 +107,7 @@ def initial_requirements(): def extras_failed_filepath(data_dir): - return ek.ek(os.path.join, data_dir, '.pip_req_spec_failed.txt') + return os.path.join(data_dir, '.pip_req_spec_failed.txt') def load_ignorables(data_dir): @@ -124,7 +116,7 @@ def load_ignorables(data_dir): data = [] filepath = extras_failed_filepath(data_dir) - if ek.ek(os.path.isfile, filepath): + if os.path.isfile(filepath): try: with io.open(filepath, 'r', encoding='UTF8') as fp: data = fp.readlines() @@ -194,7 +186,7 @@ def _check_pip_env(pip_outdated=False, reset_fails=False): from sickgear import logger, PROG_DIR, DATA_DIR for cur_reco_file in ['requirements.txt', 'recommended.txt']: try: - with io.open(ek.ek(os.path.join, PROG_DIR, cur_reco_file)) as fh: + with io.open(os.path.join(PROG_DIR, cur_reco_file)) as fh: input_reco += ['%s\n' % line.strip() for line in fh] # must ensure EOL marker except (BaseException, Exception): pass @@ -302,7 +294,7 @@ def pip_update(loading_msg, updates_todo, data_dir): failed_lines = [] input_reco = None - piper_path = ek.ek(os.path.join, data_dir, '.pip_req_spec_temp.txt') + piper_path = os.path.join(data_dir, '.pip_req_spec_temp.txt') for cur_project_name, cur_data in iteritems(updates_todo): msg = 'Installing package "%s"' % cur_project_name if cur_data.get('info'): @@ -339,7 +331,7 @@ def pip_update(loading_msg, updates_todo, data_dir): if not parsed_name: parsed_name = re.findall(r'(?sim)up-to-date\S+\s*(%s).*?\s\(([^)]+)\)$' % find_name, output) parsed_name = ['' if not parsed_name else '-'.join(parsed_name[0])] - pip_version = re.findall(r'%s-([\d.]+).*?' % find_name, ek.ek(os.path.basename, parsed_name[0]), re.I)[0] + pip_version = re.findall(r'%s-([\d.]+).*?' % find_name, os.path.basename(parsed_name[0]), re.I)[0] except (BaseException, Exception): pass diff --git a/sickgear/postProcessor.py b/sickgear/postProcessor.py index 5585fd3a..945f257b 100644 --- a/sickgear/postProcessor.py +++ b/sickgear/postProcessor.py @@ -22,8 +22,6 @@ import re import stat import threading -# noinspection PyPep8Naming -import encodingKludge as ek import exceptions_helper from exceptions_helper import ex @@ -64,17 +62,16 @@ class PostProcessor(object): nzb_name: The name of the NZB which resulted in this file being downloaded (optional) """ # absolute path to the folder that is being processed - self.folder_path = long_path(ek.ek(os.path.dirname, long_path( - ek.ek(os.path.abspath, long_path(file_path))))) # type: AnyStr + self.folder_path = long_path(os.path.dirname(long_path(os.path.abspath(long_path(file_path))))) # type: AnyStr # full path to file self.file_path = long_path(file_path) # type: AnyStr # file name only - self.file_name = ek.ek(os.path.basename, long_path(file_path)) # type: AnyStr + self.file_name = os.path.basename(long_path(file_path)) # type: AnyStr # the name of the folder only - self.folder_name = ek.ek(os.path.basename, self.folder_path) # type: AnyStr + self.folder_name = os.path.basename(self.folder_path) # type: AnyStr # name of the NZB that resulted in this folder self.nzb_name = nzb_name # type: AnyStr or None @@ -112,8 +109,8 @@ class PostProcessor(object): :param level: The log level to use (optional) :type level: int """ - logger_msg = re.sub(r'(?i)\.*', '', message) - logger_msg = re.sub('(?i)]+>([^<]+)<[/]a>', r'\1', logger_msg) + logger_msg = re.sub(r'(?i)\.*', '', message) + logger_msg = re.sub('(?i)]+>([^<]+)', r'\1', logger_msg) logger.log(u'%s' % logger_msg, level) self.log += message + '\n' @@ -136,12 +133,12 @@ class PostProcessor(object): return PostProcessor.DOESNT_EXIST # if the new file exists, return the appropriate code depending on the size - if ek.ek(os.path.isfile, existing_file): + if os.path.isfile(existing_file): new_file = u'New file %s
.. is ' % self.file_path - if ek.ek(os.path.getsize, self.file_path) == ek.ek(os.path.getsize, existing_file): + if os.path.getsize(self.file_path) == os.path.getsize(existing_file): self._log(u'%sthe same size as %s' % (new_file, existing_file), logger.DEBUG) return PostProcessor.EXISTS_SAME - elif ek.ek(os.path.getsize, self.file_path) < ek.ek(os.path.getsize, existing_file): + elif os.path.getsize(self.file_path) < os.path.getsize(existing_file): self._log(u'%ssmaller than %s' % (new_file, existing_file), logger.DEBUG) return PostProcessor.EXISTS_LARGER else: @@ -188,7 +185,7 @@ class PostProcessor(object): base_name = re.sub(r'[\[\]*?]', r'[\g<0>]', base_name) for meta_ext in ['', '-thumb', '.ext', '.ext.cover', '.metathumb']: - for associated_file_path in ek.ek(glob.glob, '%s%s.*' % (base_name, meta_ext)): + for associated_file_path in glob.glob('%s%s.*' % (base_name, meta_ext)): # only add associated to list if associated_file_path == file_path: continue @@ -201,7 +198,7 @@ class PostProcessor(object): if re.search(r'(^.+\.(rar|r\d+)$)', associated_file_path): continue - if ek.ek(os.path.isfile, associated_file_path): + if os.path.isfile(associated_file_path): file_path_list.append(associated_file_path) return file_path_list @@ -230,13 +227,13 @@ class PostProcessor(object): # delete the file and any other files which we want to delete for cur_file in file_list: - if ek.ek(os.path.isfile, cur_file): + if os.path.isfile(cur_file): # check first the read-only attribute - file_attribute = ek.ek(os.stat, cur_file)[0] + file_attribute = os.stat(cur_file)[0] if not file_attribute & stat.S_IWRITE: # File is read-only, so make it writeable try: - ek.ek(os.chmod, cur_file, stat.S_IWRITE) + os.chmod(cur_file, stat.S_IWRITE) self._log(u'Changed read only permissions to writeable to delete file %s' % cur_file, logger.DEBUG) except (BaseException, Exception): @@ -245,7 +242,7 @@ class PostProcessor(object): removal_type = helpers.remove_file(cur_file, log_level=logger.DEBUG) - if True is not ek.ek(os.path.isfile, cur_file): + if True is not os.path.isfile(cur_file): self._log(u'%s file %s' % (removal_type, cur_file), logger.DEBUG) # do the library update for synoindex @@ -294,7 +291,7 @@ class PostProcessor(object): # deal with all files for cur_file_path in file_list: - cur_file_name = ek.ek(os.path.basename, cur_file_path) + cur_file_name = os.path.basename(cur_file_path) # get the extension without . cur_extension = cur_file_path[old_base_name_length + 1:] @@ -304,10 +301,10 @@ class PostProcessor(object): cur_extension = 'nfo-orig' # check if file have subtitles language - if ek.ek(os.path.splitext, cur_extension)[1][1:] in common.subtitleExtensions: - cur_lang = ek.ek(os.path.splitext, cur_extension)[0] + if os.path.splitext(cur_extension)[1][1:] in common.subtitleExtensions: + cur_lang = os.path.splitext(cur_extension)[0] if cur_lang in sickgear.SUBTITLES_LANGUAGES: - cur_extension = cur_lang + ek.ek(os.path.splitext, cur_extension)[1] + cur_extension = cur_lang + os.path.splitext(cur_extension)[1] # If new base name then convert name if new_base_name: @@ -317,15 +314,15 @@ class PostProcessor(object): new_file_name = helpers.replace_extension(cur_file_name, cur_extension) if sickgear.SUBTITLES_DIR and cur_extension in common.subtitleExtensions: - subs_new_path = ek.ek(os.path.join, new_path, sickgear.SUBTITLES_DIR) + subs_new_path = os.path.join(new_path, sickgear.SUBTITLES_DIR) dir_exists = helpers.make_dir(subs_new_path) if not dir_exists: logger.log(u'Unable to create subtitles folder ' + subs_new_path, logger.ERROR) else: helpers.chmod_as_parent(subs_new_path) - new_file_path = ek.ek(os.path.join, subs_new_path, new_file_name) + new_file_path = os.path.join(subs_new_path, new_file_name) else: - new_file_path = ek.ek(os.path.join, new_path, new_file_name) + new_file_path = os.path.join(new_path, new_file_name) if None is action_tmpl: action(cur_file_path, new_file_path) @@ -598,7 +595,7 @@ class PostProcessor(object): and parse_result.release_group: if not self.release_name: - self.release_name = helpers.remove_extension(ek.ek(os.path.basename, parse_result.original_name)) + self.release_name = helpers.remove_extension(os.path.basename(parse_result.original_name)) else: logger.log(u'Parse result not sufficient (all following have to be set). will not save release name', @@ -824,7 +821,7 @@ class PostProcessor(object): try: script_cmd = [piece for piece in re.split("( |\\\".*?\\\"|'.*?')", script_name) if piece.strip()] - script_cmd[0] = ek.ek(os.path.abspath, script_cmd[0]) + script_cmd[0] = os.path.abspath(script_cmd[0]) self._log(u'Absolute path to script: ' + script_cmd[0], logger.DEBUG) if PY2: @@ -883,7 +880,7 @@ class PostProcessor(object): """ try: - existing_show_path = ek.ek(os.path.isdir, ep_obj.show.location) + existing_show_path = os.path.isdir(ep_obj.show.location) except exceptions_helper.ShowDirNotFoundException: existing_show_path = False @@ -1062,10 +1059,10 @@ class PostProcessor(object): :rtype: bool """ - self._log(u'Processing... %s%s' % (ek.ek(os.path.relpath, self.file_path, self.folder_path), + self._log(u'Processing... %s%s' % (os.path.relpath(self.file_path, self.folder_path), (u'
.. from nzb %s' % self.nzb_name, u'')[None is self.nzb_name])) - if ek.ek(os.path.isdir, self.file_path): + if os.path.isdir(self.file_path): self._log(u'Expecting file %s
.. is actually a directory, skipping' % self.file_path) return False @@ -1110,9 +1107,9 @@ class PostProcessor(object): try: self._delete(cur_ep_obj.location, associated_files=True) - # clean up any left over folders + # clean up any leftover folders if cur_ep_obj.location: - helpers.delete_empty_folders(ek.ek(os.path.dirname, cur_ep_obj.location), + helpers.delete_empty_folders(os.path.dirname(cur_ep_obj.location), keep_dir=ep_obj.show_obj.location) except (OSError, IOError): raise exceptions_helper.PostProcessingFailed(u'Unable to delete existing files') @@ -1122,10 +1119,10 @@ class PostProcessor(object): # cur_ep_obj.status = common.Quality.compositeStatus(common.SNATCHED, new_ep_quality) # if the show directory doesn't exist then make it if allowed - if not ek.ek(os.path.isdir, ep_obj.show_obj.location) and sickgear.CREATE_MISSING_SHOW_DIRS: + if not os.path.isdir(ep_obj.show_obj.location) and sickgear.CREATE_MISSING_SHOW_DIRS: self._log(u'Show directory does not exist, creating it', logger.DEBUG) try: - ek.ek(os.mkdir, ep_obj.show_obj.location) + os.mkdir(ep_obj.show_obj.location) # do the library update for synoindex notifiers.NotifierFactory().get('SYNOINDEX').addFolder(ep_obj.show_obj.location) except (OSError, IOError): @@ -1138,7 +1135,7 @@ class PostProcessor(object): self._change_ep_objs(show_obj, season_number, episode_numbers, new_ep_quality) # Just want to keep this consistent for failed handling right now - release_name = show_name_helpers.determineReleaseName(self.folder_path, self.nzb_name) + release_name = show_name_helpers.determine_release_name(self.folder_path, self.nzb_name) if None is release_name: self._log(u'No snatched release found in history', logger.WARNING) elif sickgear.USE_FAILED_DOWNLOADS: @@ -1147,8 +1144,8 @@ class PostProcessor(object): # find the destination folder try: proper_path = ep_obj.proper_path() - proper_absolute_path = ek.ek(os.path.join, ep_obj.show_obj.location, proper_path) - dest_path = ek.ek(os.path.dirname, proper_absolute_path) + proper_absolute_path = os.path.join(ep_obj.show_obj.location, proper_path) + dest_path = os.path.dirname(proper_absolute_path) except exceptions_helper.ShowDirNotFoundException: raise exceptions_helper.PostProcessingFailed( @@ -1162,7 +1159,7 @@ class PostProcessor(object): # figure out the base name of the resulting episode file if sickgear.RENAME_EPISODES: - new_base_name = ek.ek(os.path.basename, proper_path) + new_base_name = os.path.basename(proper_path) new_file_name = new_base_name + '.' + self.file_name.rpartition('.')[-1] else: @@ -1224,7 +1221,7 @@ class PostProcessor(object): sql_l = [] for cur_ep_obj in [ep_obj] + ep_obj.related_ep_obj: with cur_ep_obj.lock: - cur_ep_obj.location = ek.ek(os.path.join, dest_path, new_file_name) + cur_ep_obj.location = os.path.join(dest_path, new_file_name) if dosubs: cur_ep_obj.download_subtitles(force=True) # set file modify stamp to show airdate diff --git a/sickgear/processTV.py b/sickgear/processTV.py index fa5e14da..18a7a0dc 100644 --- a/sickgear/processTV.py +++ b/sickgear/processTV.py @@ -24,8 +24,6 @@ import shutil import stat import sys -# noinspection PyPep8Naming -import encodingKludge as ek import exceptions_helper from exceptions_helper import ex, MultipleShowObjectsException from json_helper import json_dumps, json_loads @@ -79,8 +77,8 @@ class ProcessTVShow(object): if None is not text: self._output.append(text) if self.webhandler: - logger_msg = re.sub(r'(?i)', '\n', text) - logger_msg = re.sub('(?i)]+>([^<]+)<[/]a>', r'\1', logger_msg) + logger_msg = re.sub(r'(?i)', '\n', text) + logger_msg = re.sub('(?i)]+>([^<]+)', r'\1', logger_msg) self.webhandler('%s%s' % (logger_msg, u'\n')) def _log_helper(self, message, log_level=logger.DEBUG): @@ -91,8 +89,8 @@ class ProcessTVShow(object): :param log_level: log level :type log_level: int """ - logger_msg = re.sub(r'(?i)\.*', '', message) - logger_msg = re.sub('(?i)]+>([^<]+)<[/]a>', r'\1', logger_msg) + logger_msg = re.sub(r'(?i)\.*', '', message) + logger_msg = re.sub('(?i)]+>([^<]+)', r'\1', logger_msg) logger.log(u'%s' % logger_msg, log_level) self._buffer(message) return @@ -124,7 +122,7 @@ class ProcessTVShow(object): :rtype: bool """ # check if it's a folder - if not ek.ek(os.path.isdir, folder): + if not os.path.isdir(folder): return False # make sure it isn't TV_DOWNLOAD_DIR @@ -142,7 +140,7 @@ class ProcessTVShow(object): logger.log(u'Warning: unable to delete folder: %s: %s' % (folder, ex(e)), logger.WARNING) return False - if ek.ek(os.path.isdir, folder): + if os.path.isdir(folder): logger.log(u'Warning: unable to delete folder: %s' % folder, logger.WARNING) return False @@ -164,24 +162,24 @@ class ProcessTVShow(object): # Delete all file not needed for cur_file in notwanted_files: - cur_file_path = ek.ek(os.path.join, process_path, cur_file) + cur_file_path = os.path.join(process_path, cur_file) - if not ek.ek(os.path.isfile, cur_file_path): + if not os.path.isfile(cur_file_path): continue # Prevent error when a notwantedfiles is an associated files # check first the read-only attribute - file_attribute = ek.ek(os.stat, cur_file_path)[0] + file_attribute = os.stat(cur_file_path)[0] if not file_attribute & stat.S_IWRITE: # File is read-only, so make it writeable self._log_helper(u'Changing ReadOnly flag for file ' + cur_file) try: - ek.ek(os.chmod, cur_file_path, stat.S_IWRITE) + os.chmod(cur_file_path, stat.S_IWRITE) except OSError as e: self._log_helper(u'Cannot change permissions of %s: %s' % (cur_file_path, ex(e))) removal_type = helpers.remove_file(cur_file_path) - if ek.ek(os.path.isfile, cur_file_path): + if os.path.isfile(cur_file_path): result = False else: self._log_helper(u'%s file %s' % (removal_type, cur_file)) @@ -254,7 +252,7 @@ class ProcessTVShow(object): video_size = 0 for cur_video_file in videofiles: try: - cur_video_size = ek.ek(os.path.getsize, ek.ek(os.path.join, path, cur_video_file)) + cur_video_size = os.path.getsize(os.path.join(path, cur_video_file)) except (BaseException, Exception): continue @@ -263,7 +261,7 @@ class ProcessTVShow(object): video_pick = cur_video_file if video_pick: - vid_filename = ek.ek(os.path.splitext, video_pick)[0] + vid_filename = os.path.splitext(video_pick)[0] # check if filename is garbage, disregard it if re.search(r'^[a-zA-Z0-9]+$', vid_filename): return @@ -315,21 +313,20 @@ class ProcessTVShow(object): """ # if they passed us a real directory then assume it's the one we want - if dir_name and ek.ek(os.path.isdir, long_path(dir_name)): - dir_name = long_path(ek.ek(os.path.realpath, long_path(dir_name))) + if dir_name and os.path.isdir(long_path(dir_name)): + dir_name = long_path(os.path.realpath(long_path(dir_name))) # if the client and SickGear are not on the same machine translate the directory in a network directory - elif dir_name and sickgear.TV_DOWNLOAD_DIR and ek.ek(os.path.isdir, sickgear.TV_DOWNLOAD_DIR)\ - and ek.ek(os.path.normpath, dir_name) != ek.ek(os.path.normpath, sickgear.TV_DOWNLOAD_DIR): - dir_name = ek.ek(os.path.join, sickgear.TV_DOWNLOAD_DIR, - ek.ek(os.path.abspath, dir_name).split(os.path.sep)[-1]) + elif dir_name and sickgear.TV_DOWNLOAD_DIR and os.path.isdir(sickgear.TV_DOWNLOAD_DIR)\ + and os.path.normpath(dir_name) != os.path.normpath(sickgear.TV_DOWNLOAD_DIR): + dir_name = os.path.join(sickgear.TV_DOWNLOAD_DIR, os.path.abspath(dir_name).split(os.path.sep)[-1]) self._log_helper(u'SickGear PP Config, completed TV downloads folder: ' + sickgear.TV_DOWNLOAD_DIR) if dir_name: self._log_helper(u'Checking folder... ' + dir_name) # if we didn't find a real directory then process "failed" or just quit - if not dir_name or not ek.ek(os.path.isdir, dir_name): + if not dir_name or not os.path.isdir(dir_name): if nzb_name and failed: self._process_failed(dir_name, nzb_name, show_obj=show_obj) else: @@ -351,7 +348,7 @@ class ProcessTVShow(object): show_obj = self.check_name(re.sub(r'\.(nzb|torrent)$', '', nzb_name, flags=re.I)) if None is show_obj and dir_name: - show_obj = self.check_name(ek.ek(os.path.basename, dir_name)) + show_obj = self.check_name(os.path.basename(dir_name)) path, dirs, files = self._get_path_dir_files(dir_name, nzb_name, pp_type) @@ -376,12 +373,12 @@ class ProcessTVShow(object): self._process_failed(dir_name, nzb_name, show_obj=show_obj) self.update_history_tab() return self.result - rar_content = [x for x in rar_content if not helpers.is_link(ek.ek(os.path.join, path, x))] + rar_content = [x for x in rar_content if not helpers.is_link(os.path.join(path, x))] path, dirs, files = self._get_path_dir_files(dir_name, nzb_name, pp_type) - files = [x for x in files if not helpers.is_link(ek.ek(os.path.join, path, x))] + files = [x for x in files if not helpers.is_link(os.path.join(path, x))] video_files = filter_list(helpers.has_media_ext, files) video_in_rar = filter_list(helpers.has_media_ext, rar_content) - work_files += [ek.ek(os.path.join, path, item) for item in rar_content] + work_files += [os.path.join(path, item) for item in rar_content] if 0 < len(files): self._log_helper(u'Process file%s: %s' % (helpers.maybe_plural(files), str(files))) @@ -408,7 +405,7 @@ class ProcessTVShow(object): if None is show_obj: soh = self.check_video_filenames(path, video_in_rar) self._process_media(path, video_in_rar, nzb_name, 'move', force, force_replace, show_obj=soh) - self._delete_files(path, [ek.ek(os.path.relpath, item, path) for item in work_files], force=True) + self._delete_files(path, [os.path.relpath(item, path) for item in work_files], force=True) video_batch = set(video_files) - set(video_in_rar) else: video_batch = video_files @@ -418,7 +415,7 @@ class ProcessTVShow(object): video_pick = [''] video_size = 0 for cur_video_file in video_batch: - cur_video_size = ek.ek(os.path.getsize, ek.ek(os.path.join, path, cur_video_file)) + cur_video_size = os.path.getsize(os.path.join(path, cur_video_file)) if 0 == video_size or cur_video_size > video_size: video_size = cur_video_size video_pick = [cur_video_file] @@ -439,7 +436,7 @@ class ProcessTVShow(object): # self._set_process_success(reset=True) - for walk_path, walk_dir, files in ek.ek(os.walk, ek.ek(os.path.join, path, directory), topdown=False): + for walk_path, walk_dir, files in os.walk(os.path.join(path, directory), topdown=False): if sickgear.POSTPONE_IF_SYNC_FILES and any(filter_iter(helpers.is_sync_file, files)): self._log_helper(u'Found temporary sync files, skipping post process', logger.ERROR) @@ -452,17 +449,17 @@ class ProcessTVShow(object): continue # Ignore any symlinks at this stage to avoid the potential for unraring a symlinked archive - files = [x for x in files if not helpers.is_link(ek.ek(os.path.join, walk_path, x))] + files = [x for x in files if not helpers.is_link(os.path.join(walk_path, x))] rar_files, rarfile_history = self.unused_archives( walk_path, filter_list(helpers.is_first_rar_volume, files), pp_type, process_method, rarfile_history) rar_content = self._unrar(walk_path, rar_files, force) - work_files += [ek.ek(os.path.join, walk_path, item) for item in rar_content] + work_files += [os.path.join(walk_path, item) for item in rar_content] if self.fail_detected: self._process_failed(dir_name, nzb_name, show_obj=self.show_obj_helper(show_obj, directory)) continue - rar_content = [x for x in rar_content if not helpers.is_link(ek.ek(os.path.join, walk_path, x))] + rar_content = [x for x in rar_content if not helpers.is_link(os.path.join(walk_path, x))] files = list(set(files + rar_content)) video_files = filter_list(helpers.has_media_ext, files) video_in_rar = filter_list(helpers.has_media_ext, rar_content) @@ -483,7 +480,7 @@ class ProcessTVShow(object): video_pick = [''] video_size = 0 for cur_video_file in video_batch: - cur_video_size = ek.ek(os.path.getsize, ek.ek(os.path.join, walk_path, cur_video_file)) + cur_video_size = os.path.getsize(os.path.join(walk_path, cur_video_file)) if 0 == video_size or cur_video_size > video_size: video_size = cur_video_size @@ -512,14 +509,12 @@ class ProcessTVShow(object): self._delete_files(walk_path, notwanted_files) if 'move' == process_method \ - and ek.ek(os.path.normpath, sickgear.TV_DOWNLOAD_DIR) != ek.ek(os.path.normpath, - walk_path): + and os.path.normpath(sickgear.TV_DOWNLOAD_DIR) != os.path.normpath(walk_path): self._delete_folder(walk_path, check_empty=False) if 'copy' == process_method and work_files: - self._delete_files(path, [ek.ek(os.path.relpath, item, path) for item in work_files], force=True) - for f in sorted(list(set([ek.ek(os.path.dirname, item) for item in work_files]) - {path}), - key=len, reverse=True): + self._delete_files(path, [os.path.relpath(item, path) for item in work_files], force=True) + for f in sorted(list(set([os.path.dirname(item) for item in work_files]) - {path}), key=len, reverse=True): self._delete_folder(f) def _bottom_line(text, log_level=logger.DEBUG): @@ -561,7 +556,7 @@ class ProcessTVShow(object): if ('auto' == pp_type and sickgear.PROCESS_AUTOMATICALLY and 'copy' == process_method and sickgear.UNPACK): - archive_history_file = ek.ek(os.path.join, sickgear.DATA_DIR, 'archive_history.txt') + archive_history_file = os.path.join(sickgear.DATA_DIR, 'archive_history.txt') if not archive_history: try: @@ -572,10 +567,10 @@ class ProcessTVShow(object): init_history_cnt = len(archive_history) - archive_history = {k_arc: v for k_arc, v in iteritems(archive_history) if ek.ek(os.path.isfile, k_arc)} + archive_history = {k_arc: v for k_arc, v in iteritems(archive_history) if os.path.isfile(k_arc)} - unused_files = list(set([ek.ek(os.path.join, path, x) for x in archives]) - set(iterkeys(archive_history))) - archives = [ek.ek(os.path.basename, x) for x in unused_files] + unused_files = list(set([os.path.join(path, x) for x in archives]) - set(iterkeys(archive_history))) + archives = [os.path.basename(x) for x in unused_files] if unused_files: for f in unused_files: archive_history.setdefault(f, int(timestamp_near(datetime.datetime.utcnow()))) @@ -607,18 +602,18 @@ class ProcessTVShow(object): """ self._log_helper(u'Processing sub dir: ' + dir_name) - if ek.ek(os.path.basename, dir_name).startswith('_FAILED_'): + if os.path.basename(dir_name).startswith('_FAILED_'): self._log_helper(u'The directory name indicates it failed to extract.') failed = True - elif ek.ek(os.path.basename, dir_name).startswith('_UNDERSIZED_'): + elif os.path.basename(dir_name).startswith('_UNDERSIZED_'): self._log_helper(u'The directory name indicates that it was previously rejected for being undersized.') failed = True - elif ek.ek(os.path.basename, dir_name).upper().startswith('_UNPACK'): + elif os.path.basename(dir_name).upper().startswith('_UNPACK'): self._log_helper(u'The directory name indicates that this release is in the process of being unpacked.') return False if failed: - self._process_failed(ek.ek(os.path.join, path, dir_name), nzb_name_original, show_obj=show_obj) + self._process_failed(os.path.join(path, dir_name), nzb_name_original, show_obj=show_obj) return False if helpers.is_hidden_folder(dir_name): @@ -630,8 +625,8 @@ class ProcessTVShow(object): sql_result = my_db.select('SELECT * FROM tv_shows') for cur_result in sql_result: - if dir_name.lower().startswith(ek.ek(os.path.realpath, cur_result['location']).lower() + os.sep)\ - or dir_name.lower() == ek.ek(os.path.realpath, cur_result['location']).lower(): + if dir_name.lower().startswith(os.path.realpath(cur_result['location']).lower() + os.sep) \ + or dir_name.lower() == os.path.realpath(cur_result['location']).lower(): self._log_helper( u'Found an episode that has already been moved to its show dir, skipping', logger.ERROR) @@ -641,7 +636,7 @@ class ProcessTVShow(object): all_files = [] all_dirs = [] process_path = None - for process_path, process_dir, fileList in ek.ek(os.walk, ek.ek(os.path.join, path, dir_name), topdown=False): + for process_path, process_dir, fileList in os.walk(os.path.join(path, dir_name), topdown=False): all_dirs += process_dir all_files += fileList @@ -688,7 +683,7 @@ class ProcessTVShow(object): unpacked_files = [] if 'win32' == sys.platform: - rarfile.UNRAR_TOOL = ek.ek(os.path.join, sickgear.PROG_DIR, 'lib', 'rarfile', 'UnRAR.exe') + rarfile.UNRAR_TOOL = os.path.join(sickgear.PROG_DIR, 'lib', 'rarfile', 'UnRAR.exe') if sickgear.UNPACK and rar_files: @@ -699,7 +694,7 @@ class ProcessTVShow(object): self._log_helper(u'Unpacking archive: ' + archive) try: - rar_handle = rarfile.RarFile(ek.ek(os.path.join, path, archive)) + rar_handle = rarfile.RarFile(os.path.join(path, archive)) except (BaseException, Exception): self._log_helper(u'Failed to open archive: %s' % archive, logger.ERROR) self._set_process_success(False) @@ -707,7 +702,7 @@ class ProcessTVShow(object): try: # Skip extraction if any file in archive has previously been extracted skip_file = False - for file_in_archive in [ek.ek(os.path.basename, x.filename) + for file_in_archive in [os.path.basename(x.filename) for x in rar_handle.infolist() if not x.is_dir()]: if self._already_postprocessed(path, file_in_archive, force): self._log_helper( @@ -721,8 +716,7 @@ class ProcessTVShow(object): raise rarfile.PasswordRequired rar_handle.extractall(path=path) - rar_content = [ek.ek(os.path.normpath, x.filename) - for x in rar_handle.infolist() if not x.is_dir()] + rar_content = [os.path.normpath(x.filename) for x in rar_handle.infolist() if not x.is_dir()] renamed = self.cleanup_names(path, rar_content) cur_unpacked = rar_content if not renamed else \ (list(set(rar_content) - set(iterkeys(renamed))) + list_values(renamed)) @@ -744,7 +738,7 @@ class ProcessTVShow(object): # check for passworded rar's for archive in rar_files: try: - rar_handle = rarfile.RarFile(ek.ek(os.path.join, path, archive)) + rar_handle = rarfile.RarFile(os.path.join(path, archive)) except (BaseException, Exception): self._log_helper(u'Failed to open archive: %s' % archive, logger.ERROR) continue @@ -773,7 +767,7 @@ class ProcessTVShow(object): old_name = None new_name = None params = { - 'base_name': ek.ek(os.path.basename, directory), + 'base_name': os.path.basename(directory), 'reverse_pattern': re.compile('|'.join([ r'\.\d{2}e\d{2}s\.', r'\.p0(?:63|27|612)\.', r'\.[pi](?:084|675|0801)\.', r'\b[45]62[xh]\.', r'\.yarulb\.', r'\.vtd[hp]\.', r'\.(?:ld[.-]?)?bew\.', r'\.pir.?(?:shv|dov|dvd|bew|db|rb)\.', @@ -797,9 +791,9 @@ class ProcessTVShow(object): for cur_filename in _filenames: - file_name, file_extension = ek.ek(os.path.splitext, cur_filename) - file_path = ek.ek(os.path.join, _dirpath, cur_filename) - dir_name = ek.ek(os.path.dirname, file_path) + file_name, file_extension = os.path.splitext(cur_filename) + file_path = os.path.join(_dirpath, cur_filename) + dir_name = os.path.dirname(file_path) if None is not reverse_pattern.search(file_name): na_parts = season_pattern.search(file_name) @@ -817,32 +811,32 @@ class ProcessTVShow(object): new_filename = file_name[::-1] logger.log('Reversing base filename "%s" to "%s"' % (file_name, new_filename)) try: - ek.ek(os.rename, file_path, ek.ek(os.path.join, _dirpath, new_filename + file_extension)) - is_renamed[ek.ek(os.path.relpath, file_path, directory)] = ek.ek( - os.path.relpath, new_filename + file_extension, directory) + os.rename(file_path, os.path.join(_dirpath, new_filename + file_extension)) + is_renamed[os.path.relpath(file_path, directory)] = \ + os.path.relpath(new_filename + file_extension, directory) except OSError as _e: logger.log('Error unable to rename file "%s" because %s' % (cur_filename, ex(_e)), logger.ERROR) elif helpers.has_media_ext(cur_filename) and \ None is not garbage_name.search(file_name) and None is not media_pattern.search(base_name): _num_videos += 1 _old_name = file_path - _new_name = ek.ek(os.path.join, dir_name, '%s%s' % (base_name, file_extension)) + _new_name = os.path.join(dir_name, '%s%s' % (base_name, file_extension)) return is_renamed, _num_videos, _old_name, _new_name if files: is_renamed, num_videos, old_name, new_name = renamer( directory, files, num_videos, old_name, new_name, **params) else: - for cur_dirpath, void, cur_filenames in ek.ek(os.walk, directory): + for cur_dirpath, void, cur_filenames in os.walk(directory): is_renamed, num_videos, old_name, new_name = renamer( cur_dirpath, cur_filenames, num_videos, old_name, new_name, **params) if all([not is_renamed, 1 == num_videos, old_name, new_name]): - try_name = ek.ek(os.path.basename, new_name) - logger.log('Renaming file "%s" using dirname as "%s"' % (ek.ek(os.path.basename, old_name), try_name)) + try_name = os.path.basename(new_name) + logger.log('Renaming file "%s" using dirname as "%s"' % (os.path.basename(old_name), try_name)) try: - ek.ek(os.rename, old_name, new_name) - is_renamed[ek.ek(os.path.relpath, old_name, directory)] = ek.ek(os.path.relpath, new_name, directory) + os.rename(old_name, new_name) + is_renamed[os.path.relpath(old_name, directory)] = os.path.relpath(new_name, directory) except OSError as e: logger.log('Error unable to rename file "%s" because %s' % (old_name, ex(e)), logger.ERROR) @@ -859,11 +853,11 @@ class ProcessTVShow(object): result = False chunks = {} matcher = re.compile(r'\.[0-9]+$') - for dirpath, void, filenames in ek.ek(os.walk, directory): + for dirpath, void, filenames in os.walk(directory): for filename in filenames: if None is not matcher.search(filename): - maybe_chunk = ek.ek(os.path.join, dirpath, filename) - base_filepath, ext = ek.ek(os.path.splitext, maybe_chunk) + maybe_chunk = os.path.join(dirpath, filename) + base_filepath, ext = os.path.splitext(maybe_chunk) if base_filepath not in chunks: chunks[base_filepath] = [] chunks[base_filepath].append(maybe_chunk) @@ -874,22 +868,22 @@ class ProcessTVShow(object): for base_filepath in chunks: chunks[base_filepath].sort() chunk_set = chunks[base_filepath] - if ek.ek(os.path.isfile, base_filepath): - base_filesize = ek.ek(os.path.getsize, base_filepath) - chunk_sizes = [ek.ek(os.path.getsize, x) for x in chunk_set] + if os.path.isfile(base_filepath): + base_filesize = os.path.getsize(base_filepath) + chunk_sizes = [os.path.getsize(x) for x in chunk_set] largest_chunk = max(chunk_sizes) if largest_chunk >= base_filesize: outfile = '%s.001' % base_filepath if outfile not in chunk_set: try: - ek.ek(os.rename, base_filepath, outfile) + os.rename(base_filepath, outfile) except OSError: logger.log('Error unable to rename file %s' % base_filepath, logger.ERROR) return result chunk_set.append(outfile) chunk_set.sort() else: - del_dir, del_file = ek.ek(os.path.split, base_filepath) + del_dir, del_file = os.path.split(base_filepath) if not self._delete_files(del_dir, [del_file], force=True): return result else: @@ -1048,7 +1042,7 @@ class ProcessTVShow(object): self._set_process_success(False) continue - cur_video_file_path = ek.ek(os.path.join, process_path, cur_video_file) + cur_video_file_path = os.path.join(process_path, cur_video_file) parent = self.find_parent(cur_video_file_path) if parent: @@ -1097,16 +1091,16 @@ class ProcessTVShow(object): if dir_name == sickgear.TV_DOWNLOAD_DIR and not nzb_name or 'manual' == pp_type: # Scheduled Media Process Active # Get at first all the subdir in the dir_name - for path, dirs, files in ek.ek(os.walk, dir_name): - files = [x for x in files if not helpers.is_link(ek.ek(os.path.join, path, x))] + for path, dirs, files in os.walk(dir_name): + files = [x for x in files if not helpers.is_link(os.path.join(path, x))] break else: - path, dirs = ek.ek(os.path.split, dir_name) # Script Media Process + path, dirs = os.path.split(dir_name) # Script Media Process if None is not nzb_name and not nzb_name.endswith('.nzb') and \ - ek.ek(os.path.isfile, ek.ek(os.path.join, dir_name, nzb_name)): + os.path.isfile(os.path.join(dir_name, nzb_name)): # For single torrent file without directory dirs = [] - files = [ek.ek(os.path.join, dir_name, nzb_name)] + files = [os.path.join(dir_name, nzb_name)] else: dirs = [dirs] files = [] diff --git a/sickgear/properFinder.py b/sickgear/properFinder.py index b27f9c78..9e26b98c 100644 --- a/sickgear/properFinder.py +++ b/sickgear/properFinder.py @@ -21,8 +21,6 @@ import re import threading import traceback -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex, MultipleShowObjectsException, AuthException import sickgear @@ -466,7 +464,7 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime for hitem in history_results: # if the result exists in history already we need to skip it if clean_proper_name == _generic_name(helpers.remove_non_release_groups( - ek.ek(os.path.basename, hitem['resource']))): + os.path.basename(hitem['resource']))): is_same = True break if is_same: diff --git a/sickgear/providers/filesharingtalk.py b/sickgear/providers/filesharingtalk.py index 9890f9ac..e97a69ec 100644 --- a/sickgear/providers/filesharingtalk.py +++ b/sickgear/providers/filesharingtalk.py @@ -311,7 +311,7 @@ class FSTProvider(generic.NZBProvider): :return: list of search strings :rtype: List[AnyStr] """ - return [x for x in show_name_helpers.makeSceneSeasonSearchString(self.show_obj, ep_obj)] + return [x for x in show_name_helpers.make_scene_season_search_string(self.show_obj, ep_obj)] def _episode_strings(self, ep_obj): """ @@ -321,7 +321,7 @@ class FSTProvider(generic.NZBProvider): :return: list of search strings :rtype: List[AnyStr] """ - return [x for x in show_name_helpers.makeSceneSearchString(self.show_obj, ep_obj)] + return [x for x in show_name_helpers.make_scene_search_string(self.show_obj, ep_obj)] @staticmethod def ui_string(key=None): diff --git a/sickgear/providers/generic.py b/sickgear/providers/generic.py index 43da77f6..aea3e471 100644 --- a/sickgear/providers/generic.py +++ b/sickgear/providers/generic.py @@ -29,9 +29,7 @@ import threading import socket import zlib -# noinspection PyPep8Naming -import encodingKludge as ek -from exceptions_helper import SickBeardException, AuthException, ex +from exceptions_helper import SickGearException, AuthException, ex import sickgear from .. import classes, db, helpers, logger, tvcache @@ -60,7 +58,7 @@ if False: from typing import Any, AnyStr, Callable, Dict, List, Match, Optional, Tuple, Union -class HaltParseException(SickBeardException): +class HaltParseException(SickGearException): """Something requires the current processing to abort""" @@ -653,8 +651,7 @@ class GenericProvider(object): :return: """ for name in ['%s.%s' % (self.get_id(), image_ext) for image_ext in ['png', 'gif', 'jpg']]: - if ek.ek(os.path.isfile, - ek.ek(os.path.join, sickgear.PROG_DIR, 'gui', sickgear.GUI_NAME, 'images', 'providers', name)): + if os.path.isfile(os.path.join(sickgear.PROG_DIR, 'gui', sickgear.GUI_NAME, 'images', 'providers', name)): return name return '%s.png' % ('newznab', default_name[0])[any(default_name)] @@ -838,11 +835,11 @@ class GenericProvider(object): cache_dir = sickgear.CACHE_DIR or helpers.get_system_temp_dir() base_name = '%s.%s' % (re.sub('.%s$' % self.providerType, '', helpers.sanitize_filename(result.name)), self.providerType) - final_file = ek.ek(os.path.join, final_dir, base_name) + final_file = os.path.join(final_dir, base_name) cached = result.cache_filepath - if cached and ek.ek(os.path.isfile, cached): - base_name = ek.ek(os.path.basename, cached) - cache_file = ek.ek(os.path.join, cache_dir, base_name) + if cached and os.path.isfile(cached): + base_name = os.path.basename(cached) + cache_file = os.path.join(cache_dir, base_name) self.session.headers['Referer'] = url if cached or helpers.download_file(url, cache_file, session=self.session, allow_redirects='/it' not in url, @@ -870,7 +867,7 @@ class GenericProvider(object): if not saved and 'magnet' == link_type: logger.log(u'All torrent cache servers failed to return a downloadable result', logger.DEBUG) - final_file = ek.ek(os.path.join, final_dir, '%s.%s' % (helpers.sanitize_filename(result.name), link_type)) + final_file = os.path.join(final_dir, '%s.%s' % (helpers.sanitize_filename(result.name), link_type)) try: with open(final_file, 'wb') as fp: fp.write(decode_bytes(result.url)) @@ -1880,7 +1877,7 @@ class TorrentProvider(GenericProvider): seen_attr = 'PROVIDER_SEEN' if obf and self.__module__ not in getattr(sickgear, seen_attr, []): file_path = '%s.py' % os.path.join(sickgear.PROG_DIR, *self.__module__.split('.')) - if ek.ek(os.path.isfile, file_path): + if os.path.isfile(file_path): with open(file_path, 'rb') as file_hd: c = bytearray(codecs.encode(decode_bytes(str(zlib.crc32(file_hd.read()))), 'hex_codec')) @@ -1996,7 +1993,7 @@ class TorrentProvider(GenericProvider): if 2012691328 == s + zlib.crc32(decode_bytes(('.%s' % parsed.netloc).split('.')[-2])): is_valid = False file_name = '%s.py' % os.path.join(sickgear.PROG_DIR, *self.__module__.split('.')) - if ek.ek(os.path.isfile, file_name): + if os.path.isfile(file_name): with open(file_name, 'rb') as file_hd: is_valid = s + zlib.crc32(file_hd.read()) in (1661931498, 472149389) return is_valid diff --git a/sickgear/providers/omgwtfnzbs.py b/sickgear/providers/omgwtfnzbs.py index ac2bf7e9..054dfad9 100644 --- a/sickgear/providers/omgwtfnzbs.py +++ b/sickgear/providers/omgwtfnzbs.py @@ -109,7 +109,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider): :return: list of search strings :rtype: List[AnyStr] """ - return [x for x in show_name_helpers.makeSceneSeasonSearchString(self.show_obj, ep_obj)] + return [x for x in show_name_helpers.make_scene_season_search_string(self.show_obj, ep_obj)] def _episode_strings(self, ep_obj): """ @@ -119,7 +119,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider): :return: list of search strings :rtype: List[AnyStr] """ - return [x for x in show_name_helpers.makeSceneSearchString(self.show_obj, ep_obj)] + return [x for x in show_name_helpers.make_scene_search_string(self.show_obj, ep_obj)] def _title_and_url(self, item): """ diff --git a/sickgear/providers/tokyotoshokan.py b/sickgear/providers/tokyotoshokan.py index 8396fd9e..39592d61 100644 --- a/sickgear/providers/tokyotoshokan.py +++ b/sickgear/providers/tokyotoshokan.py @@ -98,10 +98,10 @@ class TokyoToshokanProvider(generic.TorrentProvider): return results def _season_strings(self, *args, **kwargs): - return [{'Season': show_name_helpers.makeSceneSeasonSearchString(self.show_obj, *args)}] + return [{'Season': show_name_helpers.make_scene_season_search_string(self.show_obj, *args)}] def _episode_strings(self, *args, **kwargs): - return [{'Episode': show_name_helpers.makeSceneSearchString(self.show_obj, *args)}] + return [{'Episode': show_name_helpers.make_scene_search_string(self.show_obj, *args)}] class TokyoToshokanCache(tvcache.TVCache): diff --git a/sickgear/scene_exceptions.py b/sickgear/scene_exceptions.py index cb8b47eb..a9fa0afa 100644 --- a/sickgear/scene_exceptions.py +++ b/sickgear/scene_exceptions.py @@ -25,8 +25,6 @@ import threading import traceback import sickgear -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex from json_helper import json_load from . import db, helpers, logger, name_cache @@ -387,22 +385,22 @@ def _custom_exceptions_fetcher(): src_id = 'GHSG' logger.log(u'Checking to update custom alternatives from %s' % src_id) - dirpath = ek.ek(os.path.join, sickgear.CACHE_DIR, 'alts') - tmppath = ek.ek(os.path.join, dirpath, 'tmp') - file_rar = ek.ek(os.path.join, tmppath, 'alt.rar') - file_cache = ek.ek(os.path.join, dirpath, 'alt.json') + dirpath = os.path.join(sickgear.CACHE_DIR, 'alts') + tmppath = os.path.join(dirpath, 'tmp') + file_rar = os.path.join(tmppath, 'alt.rar') + file_cache = os.path.join(dirpath, 'alt.json') iv = 30 * 60 # min interval to fetch updates refresh = should_refresh(src_id, iv) - fetch_data = not ek.ek(os.path.isfile, file_cache) or (not int(os.environ.get('NO_ALT_GET', 0)) and refresh) + fetch_data = not os.path.isfile(file_cache) or (not int(os.environ.get('NO_ALT_GET', 0)) and refresh) if fetch_data: - if ek.ek(os.path.exists, tmppath): + if os.path.exists(tmppath): helpers.remove_file(tmppath, tree=True) helpers.make_path(tmppath) helpers.download_file(r'https://github.com/SickGear/sickgear.altdata/raw/main/alt.rar', file_rar) rar_handle = None if 'win32' == sys.platform: - rarfile.UNRAR_TOOL = ek.ek(os.path.join, sickgear.PROG_DIR, 'lib', 'rarfile', 'UnRAR.exe') + rarfile.UNRAR_TOOL = os.path.join(sickgear.PROG_DIR, 'lib', 'rarfile', 'UnRAR.exe') try: rar_handle = rarfile.RarFile(file_rar) rar_handle.extractall(path=dirpath, pwd='sickgear_alt') @@ -418,7 +416,7 @@ def _custom_exceptions_fetcher(): if refresh: set_last_refresh(src_id) - if not fetch_data and not ek.ek(os.path.isfile, file_cache): + if not fetch_data and not os.path.isfile(file_cache): logger.debug(u'Unable to fetch custom exceptions, skipped: %s' % file_rar) return custom_exception_dict, cnt_updated_numbers, should_refresh(src_id, iv, remaining=True) @@ -516,7 +514,7 @@ def _xem_exceptions_fetcher(): for tvid in [i for i in sickgear.TVInfoAPI().sources if 'xem_origin' in sickgear.TVInfoAPI(i).config]: logger.log(u'Checking for XEM scene exception updates for %s' % sickgear.TVInfoAPI(tvid).name) - url = 'http://thexem.info/map/allNames?origin=%s%s&seasonNumbers=1'\ + url = 'https://thexem.info/map/allNames?origin=%s%s&seasonNumbers=1'\ % (sickgear.TVInfoAPI(tvid).config['xem_origin'], ('&language=us', '')['xem' == xem_list]) parsed_json = helpers.get_url(url, parse_json=True, timeout=90) @@ -551,7 +549,7 @@ def _xem_get_ids(infosrc_name, xem_origin): """ xem_ids = [] - url = 'http://thexem.info/map/havemap?origin=%s' % xem_origin + url = 'https://thexem.info/map/havemap?origin=%s' % xem_origin task = 'Fetching show ids with%s xem scene mapping%s for origin' logger.log(u'%s %s' % (task % ('', 's'), infosrc_name)) diff --git a/sickgear/search.py b/sickgear/search.py index 7c26e8a8..ac2cde64 100644 --- a/sickgear/search.py +++ b/sickgear/search.py @@ -22,8 +22,6 @@ import re import threading import traceback -# noinspection PyPep8Naming -import encodingKludge as ek import exceptions_helper from exceptions_helper import ex from sg_helpers import write_file @@ -65,7 +63,7 @@ def _download_result(result): elif 'nzbdata' == result.resultType: # get the final file path to the nzb - file_name = ek.ek(os.path.join, sickgear.NZB_DIR, u'%s.nzb' % result.name) + file_name = os.path.join(sickgear.NZB_DIR, u'%s.nzb' % result.name) logger.log(u'Saving NZB to %s' % file_name) @@ -768,7 +766,7 @@ def cache_torrent_file( ): # type: (...) -> Optional[TorrentSearchResult] - cache_file = ek.ek(os.path.join, sickgear.CACHE_DIR or helpers.get_system_temp_dir(), + cache_file = os.path.join(sickgear.CACHE_DIR or helpers.get_system_temp_dir(), '%s.torrent' % (helpers.sanitize_filename(search_result.name))) if not helpers.download_file( diff --git a/sickgear/show_name_helpers.py b/sickgear/show_name_helpers.py index b1397df1..a18e5878 100644 --- a/sickgear/show_name_helpers.py +++ b/sickgear/show_name_helpers.py @@ -19,8 +19,6 @@ import os import copy import re -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex import sickgear @@ -233,19 +231,21 @@ def get_show_names_all_possible(show_obj, season=-1, scenify=True, spacer='.', f :param season: season :param scenify: :param spacer: spacer + :param force_anime: :return: """ - show_names = list(set(allPossibleShowNames(show_obj, season=season, force_anime=force_anime))) # type: List[AnyStr] + show_names = list(set( + all_possible_show_names(show_obj, season=season, force_anime=force_anime))) # type: List[AnyStr] if scenify: show_names = map_list(sanitize_scene_name, show_names) return url_encode(show_names, spacer) -def makeSceneSeasonSearchString(show_obj, # type: sickgear.tv.TVShow - ep_obj, # type: sickgear.tv.TVEpisode - ignore_allowlist=False, # type: bool - extra_search_type=None - ): # type: (...) -> List[AnyStr] +def make_scene_season_search_string(show_obj, # type: sickgear.tv.TVShow + ep_obj, # type: sickgear.tv.TVEpisode + ignore_allowlist=False, # type: bool + extra_search_type=None + ): # type: (...) -> List[AnyStr] """ :param show_obj: show object @@ -258,34 +258,34 @@ def makeSceneSeasonSearchString(show_obj, # type: sickgear.tv.TVShow numseasons = 0 # the search string for air by date shows is just - seasonStrings = [str(ep_obj.airdate).split('-')[0]] + season_strings = [str(ep_obj.airdate).split('-')[0]] elif show_obj.is_anime: numseasons = 0 ep_obj_list = show_obj.get_all_episodes(ep_obj.season) # get show qualities - anyQualities, bestQualities = common.Quality.splitQuality(show_obj.quality) + any_qualities, best_qualities = common.Quality.splitQuality(show_obj.quality) # compile a list of all the episode numbers we need in this 'season' - seasonStrings = [] + season_strings = [] for episode in ep_obj_list: # get quality of the episode - curCompositeStatus = episode.status - curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus) + cur_composite_status = episode.status + cur_status, cur_quality = common.Quality.splitCompositeStatus(cur_composite_status) - if bestQualities: - highestBestQuality = max(bestQualities) + if best_qualities: + highest_best_quality = max(best_qualities) else: - highestBestQuality = 0 + highest_best_quality = 0 # if we need a better one then add it to the list of episodes to fetch - if (curStatus in ( + if (cur_status in ( common.DOWNLOADED, - common.SNATCHED) and curQuality < highestBestQuality) or curStatus == common.WANTED: + common.SNATCHED) and cur_quality < highest_best_quality) or cur_status == common.WANTED: ab_number = episode.scene_absolute_number if 0 < ab_number: - seasonStrings.append("%02d" % ab_number) + season_strings.append("%02d" % ab_number) else: my_db = db.DBConnection() @@ -297,7 +297,7 @@ def makeSceneSeasonSearchString(show_obj, # type: sickgear.tv.TVShow [show_obj.tvid, show_obj.prodid]) numseasons = int(sql_result[0][0]) - seasonStrings = ["S%02d" % int(ep_obj.scene_season)] + season_strings = ["S%02d" % int(ep_obj.scene_season)] show_names = get_show_names_all_possible(show_obj, ep_obj.scene_season) @@ -312,7 +312,7 @@ def makeSceneSeasonSearchString(show_obj, # type: sickgear.tv.TVShow to_return.append(cur_name) # for providers that don't allow multiple searches in one request we only search for Sxx style stuff else: - for cur_season in seasonStrings: + for cur_season in season_strings: if not ignore_allowlist and show_obj.is_anime \ and None is not show_obj.release_groups and show_obj.release_groups.allowlist: for keyword in show_obj.release_groups.allowlist: @@ -324,10 +324,10 @@ def makeSceneSeasonSearchString(show_obj, # type: sickgear.tv.TVShow return to_return -def makeSceneSearchString(show_obj, # type: sickgear.tv.TVShow - ep_obj, # type: sickgear.tv.TVEpisode - ignore_allowlist=False # type: bool - ): # type: (...) -> List[AnyStr] +def make_scene_search_string(show_obj, # type: sickgear.tv.TVShow + ep_obj, # type: sickgear.tv.TVEpisode + ignore_allowlist=False # type: bool + ): # type: (...) -> List[AnyStr] """ :param show_obj: show object @@ -374,7 +374,7 @@ def makeSceneSearchString(show_obj, # type: sickgear.tv.TVShow return to_return -def allPossibleShowNames(show_obj, season=-1, force_anime=False): +def all_possible_show_names(show_obj, season=-1, force_anime=False): # type: (sickgear.tv.TVShow, int, bool) -> List[AnyStr] """ Figures out every possible variation of the name for a particular show. Includes TVDB name, TVRage name, @@ -382,45 +382,48 @@ def allPossibleShowNames(show_obj, season=-1, force_anime=False): :param show_obj: a TVShow object that we should get the names of :param season: season + :param force_anime: :return: a list of all the possible show names """ - showNames = get_scene_exceptions(show_obj.tvid, show_obj.prodid, season=season)[:] - if not showNames: # if we dont have any season specific exceptions fallback to generic exceptions + show_names = get_scene_exceptions(show_obj.tvid, show_obj.prodid, season=season)[:] + if not show_names: # if we dont have any season specific exceptions fallback to generic exceptions season = -1 - showNames = get_scene_exceptions(show_obj.tvid, show_obj.prodid, season=season)[:] + show_names = get_scene_exceptions(show_obj.tvid, show_obj.prodid, season=season)[:] if -1 == season: - showNames.append(show_obj.name) + show_names.append(show_obj.name) if not show_obj.is_anime and not force_anime: - newShowNames = [] + new_show_names = [] country_list = common.countryList country_list.update(dict(zip(itervalues(common.countryList), iterkeys(common.countryList)))) - for curName in set(showNames): - if not curName: + for cur_name in set(show_names): + if not cur_name: continue # if we have "Show Name Australia" or "Show Name (Australia)" this will add "Show Name (AU)" for # any countries defined in common.countryList # (and vice versa) - for curCountry in country_list: - if curName.endswith(' ' + curCountry): - newShowNames.append(curName.replace(' ' + curCountry, ' (' + country_list[curCountry] + ')')) - elif curName.endswith(' (' + curCountry + ')'): - newShowNames.append(curName.replace(' (' + curCountry + ')', ' (' + country_list[curCountry] + ')')) + for cur_country in country_list: + if cur_name.endswith(' ' + cur_country): + new_show_names.append(cur_name.replace(' ' + cur_country, + ' (' + country_list[cur_country] + ')')) + elif cur_name.endswith(' (' + cur_country + ')'): + new_show_names.append(cur_name.replace(' (' + cur_country + ')', + ' (' + country_list[cur_country] + ')')) # if we have "Show Name (2013)" this will strip the (2013) show year from the show name # newShowNames.append(re.sub('\(\d{4}\)','',curName)) - showNames += newShowNames + show_names += new_show_names - return showNames + return show_names -def determineReleaseName(dir_name=None, nzb_name=None): +def determine_release_name(dir_name=None, nzb_name=None): # type: (AnyStr, AnyStr) -> Union[AnyStr, None] - """Determine a release name from an nzb and/or folder name + """Determine a release name from a nzb and/or folder name :param dir_name: dir name :param nzb_name: nzb name :return: None or release name @@ -430,7 +433,7 @@ def determineReleaseName(dir_name=None, nzb_name=None): logger.log(u'Using nzb name for release name.') return nzb_name.rpartition('.')[0] - if not dir_name or not ek.ek(os.path.isdir, dir_name): + if not dir_name or not os.path.isdir(dir_name): return None # try to get the release name from nzb/nfo @@ -447,7 +450,7 @@ def determineReleaseName(dir_name=None, nzb_name=None): return found_file.rpartition('.')[0] # If that fails, we try the folder - folder = ek.ek(os.path.basename, dir_name) + folder = os.path.basename(dir_name) if pass_wordlist_checks(folder): # NOTE: Multiple failed downloads will change the folder name. # (e.g., appending #s) diff --git a/sickgear/show_queue.py b/sickgear/show_queue.py index 0bdacfdf..1f451fec 100644 --- a/sickgear/show_queue.py +++ b/sickgear/show_queue.py @@ -23,8 +23,6 @@ import traceback from lib.dateutil.parser import parser from lib.tvinfo_base.exceptions import * -# noinspection PyPep8Naming -import encodingKludge as ek import exceptions_helper from exceptions_helper import ex @@ -49,7 +47,7 @@ if False: from lib.tvinfo_base import TVInfoShow from .tv import TVEpisode -# Define special priority of tv source switch tasks, higher then anything else except newly added shows +# Define special priority of tv source switch tasks, higher than anything else except newly added shows SWITCH_PRIO = generic_queue.QueuePriorities.HIGH + 5 DAILY_SHOW_UPDATE_FINISHED_EVENT = 1 @@ -72,7 +70,7 @@ class ShowQueue(generic_queue.GenericQueue): def check_events(self): if self.daily_update_running and \ - not (self.isShowUpdateRunning() or sickgear.show_update_scheduler.action.amActive): + not (self.is_show_update_running() or sickgear.show_update_scheduler.action.amActive): self.execute_events(DAILY_SHOW_UPDATE_FINISHED_EVENT) self.daily_update_running = False @@ -91,24 +89,24 @@ class ShowQueue(generic_queue.GenericQueue): continue if cur_row['action_id'] in (ShowQueueActions.UPDATE, ShowQueueActions.FORCEUPDATE, - ShowQueueActions.WEBFORCEUPDATE): - self.updateShow(add_to_db=False, force=bool(cur_row['force']), - pausestatus_after=bool_none(cur_row['pausestatus_after']), - scheduled_update=bool(cur_row['scheduled_update']), - show_obj=show_obj, skip_refresh=bool(cur_row['skip_refresh']), - uid=cur_row['uid'], - web=ShowQueueActions.WEBFORCEUPDATE == cur_row['action_id']) + ShowQueueActions.WEBFORCEUPDATE): + self.update_show(add_to_db=False, force=bool(cur_row['force']), + pausestatus_after=bool_none(cur_row['pausestatus_after']), + scheduled_update=bool(cur_row['scheduled_update']), + show_obj=show_obj, skip_refresh=bool(cur_row['skip_refresh']), + uid=cur_row['uid'], + web=ShowQueueActions.WEBFORCEUPDATE == cur_row['action_id']) elif ShowQueueActions.REFRESH == cur_row['action_id']: - self.refreshShow(add_to_db=False, force=bool(cur_row['force']), - force_image_cache=bool(cur_row['force_image_cache']), - priority=cur_row['priority'], - scheduled_update=bool(cur_row['scheduled_update']), - show_obj=show_obj, - uid=cur_row['uid']) + self.refresh_show(add_to_db=False, force=bool(cur_row['force']), + force_image_cache=bool(cur_row['force_image_cache']), + priority=cur_row['priority'], + scheduled_update=bool(cur_row['scheduled_update']), + show_obj=show_obj, + uid=cur_row['uid']) elif ShowQueueActions.RENAME == cur_row['action_id']: - self.renameShowEpisodes(add_to_db=False, show_obj=show_obj, uid=cur_row['uid']) + self.rename_show_episodes(add_to_db=False, show_obj=show_obj, uid=cur_row['uid']) elif ShowQueueActions.SUBTITLE == cur_row['action_id']: self.download_subtitles(add_to_db=False, show_obj=show_obj, uid=cur_row['uid']) @@ -243,7 +241,7 @@ class ShowQueue(generic_queue.GenericQueue): # type: (List[integer_types], bool) -> None generic_queue.GenericQueue._remove_from_queue(self, to_remove=to_remove, force=force) - def _isInQueue(self, show_obj, actions): + def _is_in_queue(self, show_obj, actions): # type: (TVShow, Tuple[integer_types, ...]) -> bool """ @@ -254,7 +252,7 @@ class ShowQueue(generic_queue.GenericQueue): with self.lock: return any(1 for x in self.queue if x.action_id in actions and show_obj == x.show_obj) - def _isBeingSomethinged(self, show_obj, actions): + def _is_being_somethinged(self, show_obj, actions): # type: (TVShow, Tuple[integer_types, ...]) -> bool """ @@ -269,7 +267,7 @@ class ShowQueue(generic_queue.GenericQueue): and show_obj == self.currentItem.show_obj \ and self.currentItem.action_id in actions - def isInUpdateQueue(self, show_obj): + def is_in_update_queue(self, show_obj): # type: (TVShow) -> bool """ @@ -278,10 +276,10 @@ class ShowQueue(generic_queue.GenericQueue): :return: :rtype: bool """ - return self._isInQueue(show_obj, (ShowQueueActions.UPDATE, ShowQueueActions.FORCEUPDATE, - ShowQueueActions.WEBFORCEUPDATE)) + return self._is_in_queue(show_obj, (ShowQueueActions.UPDATE, ShowQueueActions.FORCEUPDATE, + ShowQueueActions.WEBFORCEUPDATE)) - def isInRefreshQueue(self, show_obj): + def is_in_refresh_queue(self, show_obj): # type: (TVShow) -> bool """ @@ -290,9 +288,9 @@ class ShowQueue(generic_queue.GenericQueue): :return: :rtype: bool """ - return self._isInQueue(show_obj, (ShowQueueActions.REFRESH,)) + return self._is_in_queue(show_obj, (ShowQueueActions.REFRESH,)) - def isInRenameQueue(self, show_obj): + def is_in_rename_queue(self, show_obj): # type: (TVShow) -> bool """ @@ -301,9 +299,9 @@ class ShowQueue(generic_queue.GenericQueue): :return: :rtype: bool """ - return self._isInQueue(show_obj, (ShowQueueActions.RENAME,)) + return self._is_in_queue(show_obj, (ShowQueueActions.RENAME,)) - def isInSubtitleQueue(self, show_obj): + def is_in_subtitle_queue(self, show_obj): # type: (TVShow) -> bool """ @@ -312,9 +310,9 @@ class ShowQueue(generic_queue.GenericQueue): :return: :rtype: bool """ - return self._isInQueue(show_obj, (ShowQueueActions.SUBTITLE,)) + return self._is_in_queue(show_obj, (ShowQueueActions.SUBTITLE,)) - def isBeingAdded(self, show_obj): + def is_being_added(self, show_obj): # type: (TVShow) -> bool """ @@ -323,9 +321,9 @@ class ShowQueue(generic_queue.GenericQueue): :return: :rtype: bool """ - return self._isBeingSomethinged(show_obj, (ShowQueueActions.ADD,)) + return self._is_being_somethinged(show_obj, (ShowQueueActions.ADD,)) - def isBeingUpdated(self, show_obj): + def is_being_updated(self, show_obj): # type: (TVShow) -> bool """ @@ -334,10 +332,10 @@ class ShowQueue(generic_queue.GenericQueue): :return: :rtype: bool """ - return self._isBeingSomethinged(show_obj, (ShowQueueActions.UPDATE, ShowQueueActions.FORCEUPDATE, - ShowQueueActions.WEBFORCEUPDATE)) + return self._is_being_somethinged(show_obj, (ShowQueueActions.UPDATE, ShowQueueActions.FORCEUPDATE, + ShowQueueActions.WEBFORCEUPDATE)) - def isBeingRefreshed(self, show_obj): + def is_being_refreshed(self, show_obj): # type: (TVShow) -> bool """ @@ -346,9 +344,9 @@ class ShowQueue(generic_queue.GenericQueue): :return: :rtype: bool """ - return self._isBeingSomethinged(show_obj, (ShowQueueActions.REFRESH,)) + return self._is_being_somethinged(show_obj, (ShowQueueActions.REFRESH,)) - def isBeingRenamed(self, show_obj): + def is_being_renamed(self, show_obj): # type: (TVShow) -> bool """ @@ -357,9 +355,9 @@ class ShowQueue(generic_queue.GenericQueue): :return: :rtype: bool """ - return self._isBeingSomethinged(show_obj, (ShowQueueActions.RENAME,)) + return self._is_being_somethinged(show_obj, (ShowQueueActions.RENAME,)) - def isBeingSubtitled(self, show_obj): + def is_being_subtitled(self, show_obj): # type: (TVShow) -> bool """ @@ -368,9 +366,9 @@ class ShowQueue(generic_queue.GenericQueue): :return: :rtype: bool """ - return self._isBeingSomethinged(show_obj, (ShowQueueActions.SUBTITLE,)) + return self._is_being_somethinged(show_obj, (ShowQueueActions.SUBTITLE,)) - def isShowUpdateRunning(self): + def is_show_update_running(self): """ :return: @@ -387,7 +385,7 @@ class ShowQueue(generic_queue.GenericQueue): :param show_obj: show object """ - return self._isBeingSomethinged(show_obj, (ShowQueueActions.SWITCH,)) + return self._is_being_somethinged(show_obj, (ShowQueueActions.SWITCH,)) def is_show_switch_queued(self, show_obj): # type: (TVShow) -> bool @@ -396,21 +394,21 @@ class ShowQueue(generic_queue.GenericQueue): :param show_obj: show object """ - return self._isInQueue(show_obj, (ShowQueueActions.SWITCH,)) + return self._is_in_queue(show_obj, (ShowQueueActions.SWITCH,)) def is_switch_running(self): # type: (...) -> bool with self.lock: return any(1 for x in self.queue + [self.currentItem] if isinstance(x, QueueItemSwitchSource)) - def _getLoadingShowList(self): + def _get_loading_showlist(self): """ :return: :rtype: List """ with self.lock: - return [x for x in self.queue + [self.currentItem] if None is not x and x.isLoading] + return [x for x in self.queue + [self.currentItem] if None is not x and x.is_loading] def queue_length(self): # type: (...) -> Dict[AnyStr, List[AnyStr, Dict]] @@ -454,18 +452,18 @@ class ShowQueue(generic_queue.GenericQueue): length['switch'].append(result_item) return length - loadingShowList = property(_getLoadingShowList) + loading_showlist = property(_get_loading_showlist) - def updateShow(self, - show_obj, # type: TVShow - force=False, # type: bool - web=False, # type: bool - scheduled_update=False, # type: bool - priority=generic_queue.QueuePriorities.NORMAL, # type: integer_types - uid=None, # type: integer_types - add_to_db=True, # type: bool - **kwargs # type: Any - ): # type: (...) -> Union[QueueItemUpdate, QueueItemForceUpdate, QueueItemForceUpdateWeb] + def update_show(self, + show_obj, # type: TVShow + force=False, # type: bool + web=False, # type: bool + scheduled_update=False, # type: bool + priority=generic_queue.QueuePriorities.NORMAL, # type: integer_types + uid=None, # type: integer_types + add_to_db=True, # type: bool + **kwargs # type: Any + ): # type: (...) -> Union[QueueItemUpdate, QueueItemForceUpdate, QueueItemForceUpdateWeb] """ :param show_obj: show object @@ -485,15 +483,15 @@ class ShowQueue(generic_queue.GenericQueue): :rtype: QueueItemUpdate or QueueItemForceUpdateWeb or QueueItemForceUpdate """ with self.lock: - if self.isBeingAdded(show_obj): + if self.is_being_added(show_obj): raise exceptions_helper.CantUpdateException( 'Show is still being added, wait until it is finished before you update.') - if self.isBeingUpdated(show_obj): + if self.is_being_updated(show_obj): raise exceptions_helper.CantUpdateException( 'This show is already being updated, can\'t update again until it\'s done.') - if self.isInUpdateQueue(show_obj): + if self.is_in_update_queue(show_obj): raise exceptions_helper.CantUpdateException( 'This show is already being updated, can\'t update again until it\'s done.') @@ -516,9 +514,9 @@ class ShowQueue(generic_queue.GenericQueue): return queue_item_obj - def refreshShow(self, show_obj, force=False, scheduled_update=False, after_update=False, - priority=generic_queue.QueuePriorities.HIGH, force_image_cache=False, uid=None, add_to_db=True, - **kwargs): + def refresh_show(self, show_obj, force=False, scheduled_update=False, after_update=False, + priority=generic_queue.QueuePriorities.HIGH, force_image_cache=False, uid=None, add_to_db=True, + **kwargs): # type: (TVShow, bool, bool, bool, integer_types, bool, integer_types, bool, Any) -> Optional[QueueItemRefresh] """ @@ -541,10 +539,11 @@ class ShowQueue(generic_queue.GenericQueue): :rtype: QueueItemRefresh """ with self.lock: - if (self.isBeingRefreshed(show_obj) or self.isInRefreshQueue(show_obj)) and not force: + if (self.is_being_refreshed(show_obj) or self.is_in_refresh_queue(show_obj)) and not force: raise exceptions_helper.CantRefreshException('This show is being refreshed, not refreshing again.') - if ((not after_update and self.isBeingUpdated(show_obj)) or self.isInUpdateQueue(show_obj)) and not force: + if ((not after_update and self.is_being_updated(show_obj)) + or self.is_in_update_queue(show_obj)) and not force: logger.log('Skipping this refresh as there is already an update queued or' ' in progress and a refresh is done at the end of an update anyway.', logger.DEBUG) return @@ -561,7 +560,7 @@ class ShowQueue(generic_queue.GenericQueue): return queue_item_obj - def renameShowEpisodes(self, show_obj, uid=None, add_to_db=True): + def rename_show_episodes(self, show_obj, uid=None, add_to_db=True): # type: (TVShow, integer_types, bool) -> QueueItemRename """ @@ -758,14 +757,14 @@ class ShowQueueItem(generic_queue.QueueItem): self.show_obj = show_obj # type: sickgear.tv.TVShow self.scheduled_update = scheduled_update # type: bool - def isInQueue(self): + def is_in_queue(self): """ :rtype: bool """ return self in sickgear.show_queue_scheduler.action.queue + [ sickgear.show_queue_scheduler.action.currentItem] - def _getName(self): + def _get_name(self): """ :rtype: AnyStr """ @@ -773,7 +772,7 @@ class ShowQueueItem(generic_queue.QueueItem): return self.show_obj.name return '' - def _isLoading(self): + def _is_loading(self): return False def __str__(self): @@ -782,9 +781,9 @@ class ShowQueueItem(generic_queue.QueueItem): def __repr__(self): return self.__str__() - show_name = property(_getName) + show_name = property(_get_name) - isLoading = property(_isLoading) + is_loading = property(_is_loading) class QueueItemAdd(ShowQueueItem): @@ -864,7 +863,7 @@ class QueueItemAdd(ShowQueueItem): self.priority = generic_queue.QueuePriorities.VERYHIGH - def _getName(self): + def _get_name(self): """ :return: the show name if there is a show object created, if not returns the dir that the show is being added to. @@ -876,9 +875,9 @@ class QueueItemAdd(ShowQueueItem): return self.showDir return self.show_obj.name - show_name = property(_getName) + show_name = property(_get_name) - def _isLoading(self): + def _is_loading(self): """ :return: True if we've gotten far enough to have a show object, or False if we still only know the folder name. @@ -886,7 +885,7 @@ class QueueItemAdd(ShowQueueItem): """ return None is self.show_obj - isLoading = property(_isLoading) + is_loading = property(_is_loading) # if they gave a number to start or number to end as wanted, then change those eps to it def _get_wanted(self, db_obj, wanted_max, latest): @@ -985,7 +984,7 @@ class QueueItemAdd(ShowQueueItem): if getattr(t, 'show_not_found', False): logger.log('Show %s was not found on %s, maybe show was deleted' % (self.show_name, sickgear.TVInfoAPI(self.tvid).name), logger.ERROR) - self._finishEarly() + self._finish_early() return # this usually only happens if they have an NFO in their show dir @@ -997,7 +996,7 @@ class QueueItemAdd(ShowQueueItem): 'Show in %s has no name on %s, probably the wrong language.' ' Delete .nfo and add manually in the correct language.' % (self.showDir, sickgear.TVInfoAPI(self.tvid).name)) - self._finishEarly() + self._finish_early() return except (BaseException, Exception): logger.log('Unable to find show ID:%s on TV info: %s' % (self.prodid, sickgear.TVInfoAPI(self.tvid).name), @@ -1006,7 +1005,7 @@ class QueueItemAdd(ShowQueueItem): 'Unable to look up the show in %s on %s using ID %s, not using the NFO.' ' Delete .nfo and try adding manually again.' % (self.showDir, sickgear.TVInfoAPI(self.tvid).name, self.prodid)) - self._finishEarly() + self._finish_early() return try: @@ -1056,19 +1055,19 @@ class QueueItemAdd(ShowQueueItem): else: ui.notifications.error( 'Unable to add show due to an error with %s' % sickgear.TVInfoAPI(self.tvid).name) - self._finishEarly() + self._finish_early() return except exceptions_helper.MultipleShowObjectsException: logger.log('The show in %s is already in your show list, skipping' % self.showDir, logger.ERROR) ui.notifications.error('Show skipped', 'The show in %s is already in your show list' % self.showDir) - self._finishEarly() + self._finish_early() return except (BaseException, Exception) as e: logger.log('Error trying to add show: %s' % ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.ERROR) - self._finishEarly() + self._finish_early() raise self.show_obj.load_imdb_info() @@ -1078,7 +1077,7 @@ class QueueItemAdd(ShowQueueItem): except (BaseException, Exception) as e: logger.log('Error saving the show to the database: %s' % ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.ERROR) - self._finishEarly() + self._finish_early() raise if not show_exists: @@ -1152,7 +1151,7 @@ class QueueItemAdd(ShowQueueItem): except (BaseException, Exception) as e: logger.log('Error saving the show to the database: %s' % ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.ERROR) - self._finishEarly() + self._finish_early() raise # update internal name cache @@ -1191,14 +1190,14 @@ class QueueItemAdd(ShowQueueItem): self.finish() - def _finishEarly(self): + def _finish_early(self): if None is not self.show_obj: self.show_obj.delete_show() if self.new_show: - # if we adding a new show, delete the empty folder that was already created + # if adding a new show, delete the empty folder that was already created try: - ek.ek(os.rmdir, self.showDir) + os.rmdir(self.showDir) except (BaseException, Exception): pass @@ -1381,7 +1380,7 @@ class QueueItemUpdate(ShowQueueItem): if not sickgear.TVInfoAPI(self.show_obj.tvid).config['active']: logger.log('TV info source %s is marked inactive, aborting update for show %s and continue with refresh.' % (sickgear.TVInfoAPI(self.show_obj.tvid).config['name'], self.show_obj.name)) - sickgear.show_queue_scheduler.action.refreshShow(self.show_obj, self.force, self.scheduled_update, + sickgear.show_queue_scheduler.action.refresh_show(self.show_obj, self.force, self.scheduled_update, after_update=True) return @@ -1485,7 +1484,7 @@ class QueueItemUpdate(ShowQueueItem): sickgear.MEMCACHE['history_tab'] = sickgear.webserve.History.menu_tab( sickgear.MEMCACHE['history_tab_limit']) if not getattr(self, 'skip_refresh', False): - sickgear.show_queue_scheduler.action.refreshShow(self.show_obj, self.force, self.scheduled_update, + sickgear.show_queue_scheduler.action.refresh_show(self.show_obj, self.force, self.scheduled_update, after_update=True, force_image_cache=self.force_web, **self.kwargs) diff --git a/sickgear/show_updater.py b/sickgear/show_updater.py index 7bd46c31..901b431c 100644 --- a/sickgear/show_updater.py +++ b/sickgear/show_updater.py @@ -20,8 +20,6 @@ import traceback import exceptions_helper from exceptions_helper import ex -# noinspection PyPep8Naming -import encodingKludge as ek import sickgear from . import db, logger, network_timezones, properFinder, ui @@ -72,8 +70,7 @@ class ShowUpdater(object): if sickgear.db.db_supports_backup and 0 < sickgear.BACKUP_DB_MAX_COUNT: logger.log('backing up all db\'s') try: - sickgear.db.backup_all_dbs(sickgear.BACKUP_DB_PATH or - ek.ek(os.path.join, sickgear.DATA_DIR, 'backup')) + sickgear.db.backup_all_dbs(sickgear.BACKUP_DB_PATH or os.path.join(sickgear.DATA_DIR, 'backup')) except (BaseException, Exception): logger.log('backup db error', logger.ERROR) @@ -137,7 +134,7 @@ class ShowUpdater(object): # cleanup ignore and require lists try: clean_ignore_require_words() - except Exception: + except (BaseException, Exception): logger.log('ignore, require words cleanup error', logger.ERROR) logger.log(traceback.format_exc(), logger.ERROR) @@ -166,7 +163,7 @@ class ShowUpdater(object): logger.log(traceback.format_exc(), logger.ERROR) # select 10 'Ended' tv_shows updated more than 90 days ago - # and all shows not updated more then 180 days ago to include in this update + # and all shows not updated more than 180 days ago to include in this update stale_should_update = [] stale_update_date = (update_date - datetime.timedelta(days=90)).toordinal() stale_update_date_max = (update_date - datetime.timedelta(days=180)).toordinal() @@ -204,16 +201,16 @@ class ShowUpdater(object): try: # if should_update returns True (not 'Ended') or show is selected stale 'Ended' then update, # otherwise just refresh - if cur_show_obj.should_update(update_date=update_date, - last_indexer_change=show_updates.get(cur_show_obj.tvid, {}). - get(cur_show_obj.prodid)) \ + if cur_show_obj.should_update( + update_date=update_date, + last_indexer_change=show_updates.get(cur_show_obj.tvid, {}).get(cur_show_obj.prodid)) \ or cur_show_obj.tvid_prodid in stale_should_update: - cur_queue_item = sickgear.show_queue_scheduler.action.updateShow(cur_show_obj, - scheduled_update=True) + cur_queue_item = sickgear.show_queue_scheduler.action.update_show( + cur_show_obj, scheduled_update=True) else: logger.debug(u'Not updating episodes for show %s because it\'s marked as ended and last/next' u' episode is not within the grace period.' % cur_show_obj.unique_name) - cur_queue_item = sickgear.show_queue_scheduler.action.refreshShow(cur_show_obj, True, True) + cur_queue_item = sickgear.show_queue_scheduler.action.refresh_show(cur_show_obj, True, True) pi_list.append(cur_queue_item) diff --git a/sickgear/subtitles.py b/sickgear/subtitles.py index d3a7dbbd..2cffd798 100644 --- a/sickgear/subtitles.py +++ b/sickgear/subtitles.py @@ -17,9 +17,6 @@ import datetime -# noinspection PyPep8Naming -import encodingKludge as ek - from . import db, helpers, logger from .common import * @@ -31,41 +28,41 @@ SINGLE = 'und' def sorted_service_list(): - servicesMapping = dict([(x.lower(), x) for x in subliminal.core.SERVICES]) + services_mapping = dict([(x.lower(), x) for x in subliminal.core.SERVICES]) - newList = [] + new_list = [] # add all services in the priority list, in order - curIndex = 0 - for curService in sickgear.SUBTITLES_SERVICES_LIST: - if curService in servicesMapping: - curServiceDict = dict( - id=curService, - image=curService + '.png', - name=servicesMapping[curService], - enabled=1 == sickgear.SUBTITLES_SERVICES_ENABLED[curIndex], - api_based=__import__('lib.subliminal.services.' + curService, globals=globals(), + cur_index = 0 + for cur_service in sickgear.SUBTITLES_SERVICES_LIST: + if cur_service in services_mapping: + cur_service_dict = dict( + id=cur_service, + image=cur_service + '.png', + name=services_mapping[cur_service], + enabled=1 == sickgear.SUBTITLES_SERVICES_ENABLED[cur_index], + api_based=__import__('lib.subliminal.services.' + cur_service, globals=globals(), locals=locals(), fromlist=['Service']).Service.api_based, - url=__import__('lib.subliminal.services.' + curService, globals=globals(), + url=__import__('lib.subliminal.services.' + cur_service, globals=globals(), locals=locals(), fromlist=['Service']).Service.site_url) - newList.append(curServiceDict) - curIndex += 1 + new_list.append(cur_service_dict) + cur_index += 1 # add any services that are missing from that list - for curService in servicesMapping: - if curService not in [x['id'] for x in newList]: - curServiceDict = dict( - id=curService, - image=curService + '.png', - name=servicesMapping[curService], + for cur_service in services_mapping: + if cur_service not in [x['id'] for x in new_list]: + cur_service_dict = dict( + id=cur_service, + image=cur_service + '.png', + name=services_mapping[cur_service], enabled=False, - api_based=__import__('lib.subliminal.services.' + curService, globals=globals(), + api_based=__import__('lib.subliminal.services.' + cur_service, globals=globals(), locals=locals(), fromlist=['Service']).Service.api_based, - url=__import__('lib.subliminal.services.' + curService, globals=globals(), + url=__import__('lib.subliminal.services.' + cur_service, globals=globals(), locals=locals(), fromlist=['Service']).Service.site_url) - newList.append(curServiceDict) + new_list.append(cur_service_dict) - return newList + return new_list def get_enabled_service_list(): @@ -81,10 +78,10 @@ def get_language_name(select_lang): def wanted_languages(sql_like=False): - wantedLanguages = sorted(sickgear.SUBTITLES_LANGUAGES) + wanted_langs = sorted(sickgear.SUBTITLES_LANGUAGES) if sql_like: - return '%' + ','.join(wantedLanguages) + '%' - return wantedLanguages + return '%' + ','.join(wanted_langs) + '%' + return wanted_langs def subtitles_languages(video_path): @@ -166,7 +163,7 @@ class SubtitlesFinder(object): now = datetime.datetime.now() for cur_result in sql_result: - if not ek.ek(os.path.isfile, cur_result['location']): + if not os.path.isfile(cur_result['location']): logger.log('Episode file does not exist, cannot download subtitles for episode %dx%d of show %s' % (cur_result['season'], cur_result['episode'], cur_result['show_name']), logger.DEBUG) continue diff --git a/sickgear/traktChecker.py b/sickgear/traktChecker.py index 092a0471..851ed124 100644 --- a/sickgear/traktChecker.py +++ b/sickgear/traktChecker.py @@ -19,9 +19,6 @@ import datetime import os import traceback -# noinspection PyPep8Naming -import encodingKludge as ek - import sickgear from . import helpers, logger, search_queue from .common import SKIPPED, WANTED @@ -175,7 +172,7 @@ class TraktChecker(object): location = None if location: - showPath = ek.ek(os.path.join, location, helpers.sanitize_filename(name)) + showPath = os.path.join(location, helpers.sanitize_filename(name)) dir_exists = helpers.make_dir(showPath) if not dir_exists: logger.log(u"Unable to create the folder " + showPath + ", can't add the show", logger.ERROR) diff --git a/sickgear/tv.py b/sickgear/tv.py index ab0155a3..8ec8cc69 100644 --- a/sickgear/tv.py +++ b/sickgear/tv.py @@ -36,8 +36,6 @@ import traceback from imdbpie import ImdbAPIError from lxml_etree import etree -# noinspection PyPep8Naming -import encodingKludge as ek import exceptions_helper from exceptions_helper import ex @@ -72,7 +70,7 @@ from six import integer_types, iteritems, itervalues, moves, PY2, string_types if False: from typing import Any, AnyStr, Dict, List, Optional, Set, Text, Tuple, Union from sqlite3 import Row - from lib.tvinfo_base import CastList, Character as TVINFO_Character, Person as TVINFO_Person, \ + from lib.tvinfo_base import CastList, TVInfoCharacter, TVInfoPerson, \ TVInfoEpisode, TVInfoShow coreid_warnings = False @@ -453,7 +451,7 @@ class Person(Referential): 'homepage', 'ids', 'image_url', 'name', 'nicknames', 'real_name', 'thumb_url']} def reset(self, person_obj=None): - # type: (TVINFO_Person) -> None + # type: (TVInfoPerson) -> None """ reset all properties with the exception of: name, id, ids @@ -646,7 +644,7 @@ class Person(Referential): break def update_prop_from_tvinfo_person(self, person_obj): - # type: (TVINFO_Person) -> None + # type: (TVInfoPerson) -> None """ update person with tvinfo person object info Note: doesn't change: name, id, image_url, thumb_url @@ -747,7 +745,7 @@ class Person(Referential): continue if tvsrc_result: # verify we have the correct person - for cur_person in tvsrc_result: # type: TVINFO_Person + for cur_person in tvsrc_result: # type: TVInfoPerson if None is not rp: break if not (imdb_confirmed and TVINFO_IMDB == tv_src) \ @@ -767,7 +765,7 @@ class Person(Referential): # noinspection PyUnresolvedReferences if show_obj and None is not pd and pd.characters: clean_show_name = indexermapper.clean_show_name(show_obj.name.lower()) - for ch in pd.characters or []: # type: TVINFO_Character + for ch in pd.characters or []: # type: TVInfoCharacter if clean_show_name == indexermapper.clean_show_name(ch.show.seriesname.lower()): rp = pd confirmed_on_src = True @@ -1635,7 +1633,7 @@ class TVShow(TVShowBase): if sickgear.CREATE_MISSING_SHOW_DIRS: return self._location - if ek.ek(os.path.isdir, self._location): + if os.path.isdir(self._location): return self._location raise exceptions_helper.ShowDirNotFoundException('Show folder does not exist: \'%s\'' % self._location) @@ -1644,7 +1642,7 @@ class TVShow(TVShowBase): # type: (AnyStr) -> None logger.log('Setter sets location to %s' % new_location, logger.DEBUG) # Don't validate dir if user wants to add shows without creating a dir - if sickgear.ADD_SHOWS_WO_DIR or ek.ek(os.path.isdir, new_location): + if sickgear.ADD_SHOWS_WO_DIR or os.path.isdir(new_location): self.dirty_setter('_location')(self, new_location) self.path = new_location # self._is_location_good = True @@ -2052,7 +2050,7 @@ class TVShow(TVShowBase): result = False - if not ek.ek(os.path.isdir, self._location): + if not os.path.isdir(self._location): logger.log('%s: Show directory doesn\'t exist, skipping NFO generation' % self.tvid_prodid) return False @@ -2068,7 +2066,7 @@ class TVShow(TVShowBase): :param show_only: only for show :param force: """ - if not ek.ek(os.path.isdir, self._location): + if not os.path.isdir(self._location): logger.log('%s: Show directory doesn\'t exist, skipping NFO generation' % self.tvid_prodid) return @@ -2084,7 +2082,7 @@ class TVShow(TVShowBase): def write_episode_nfo(self, force=False): # type: (bool) -> None - if not ek.ek(os.path.isdir, self._location): + if not os.path.isdir(self._location): logger.log('%s: Show directory doesn\'t exist, skipping NFO generation' % self.tvid_prodid) return @@ -2119,7 +2117,7 @@ class TVShow(TVShowBase): def update_metadata(self): - if not ek.ek(os.path.isdir, self._location): + if not os.path.isdir(self._location): logger.log('%s: Show directory doesn\'t exist, skipping NFO generation' % self.tvid_prodid) return @@ -2129,7 +2127,7 @@ class TVShow(TVShowBase): result = False - if not ek.ek(os.path.isdir, self._location): + if not os.path.isdir(self._location): logger.log('%s: Show directory doesn\'t exist, skipping NFO generation' % self.tvid_prodid) return False @@ -2142,7 +2140,7 @@ class TVShow(TVShowBase): # find all media files in the show folder and create episodes for as many as possible def load_episodes_from_dir(self): - if not ek.ek(os.path.isdir, self._location): + if not os.path.isdir(self._location): logger.log('%s: Show directory doesn\'t exist, not loading episodes from disk' % self.tvid_prodid) return @@ -2159,7 +2157,7 @@ class TVShow(TVShowBase): logger.log('%s: Creating episode from %s' % (self.tvid_prodid, cur_media_file), logger.DEBUG) try: - ep_obj = self.ep_obj_from_file(ek.ek(os.path.join, self._location, cur_media_file)) + ep_obj = self.ep_obj_from_file(os.path.join(self._location, cur_media_file)) except (exceptions_helper.ShowNotFoundException, exceptions_helper.EpisodeNotFoundException) as e: logger.log('Episode %s returned an exception: %s' % (cur_media_file, ex(e)), logger.ERROR) continue @@ -2170,8 +2168,8 @@ class TVShow(TVShowBase): continue # see if we should save the release name in the db - ep_file_name = ek.ek(os.path.basename, ep_obj.location) - ep_file_name = ek.ek(os.path.splitext, ep_file_name)[0] + ep_file_name = os.path.basename(ep_obj.location) + ep_file_name = os.path.splitext(ep_file_name)[0] try: parse_result = None @@ -2423,7 +2421,7 @@ class TVShow(TVShowBase): :param path: :return: """ - if not ek.ek(os.path.isfile, path): + if not os.path.isfile(path): logger.log('%s: Not a real file... %s' % (self.tvid_prodid, path)) return None @@ -2474,7 +2472,7 @@ class TVShow(TVShowBase): if IGNORED == status: continue - if (ep_obj.location and ek.ek(os.path.normpath, ep_obj.location) != ek.ek(os.path.normpath, path)) or \ + if (ep_obj.location and os.path.normpath(ep_obj.location) != os.path.normpath(path)) or \ (not ep_obj.location and path) or \ (SKIPPED == status): logger.log('The old episode had a different file associated with it, re-checking the quality ' + @@ -2856,14 +2854,14 @@ class TVShow(TVShowBase): if show_info.cast and self._should_cast_update(show_info.cast): sickgear.people_queue_scheduler.action.add_cast_update(show_obj=self, show_info_cast=show_info.cast, - scheduled_update=scheduled_update, switch=switch) + scheduled_update=scheduled_update, switch=switch) else: logger.log('Not updating cast for show because data is unchanged.') return show_info @staticmethod def _update_person_properties_helper(person_obj, src_person, p_ids): - # type: (Person, TVINFO_Person, Dict) -> None + # type: (Person, TVInfoPerson, Dict) -> None person_obj.update_properties( name=src_person.name, gender=src_person.gender, birthday=src_person.birthdate, deathday=src_person.deathdate, @@ -2898,7 +2896,7 @@ class TVShow(TVShowBase): cast_list = self._load_cast_from_db() remove_char_ids = {c.id for c in cast_list or []} cast_ordered = weakList() - for ct, c_l in iteritems(show_info_cast): # type: (integer_types, List[TVINFO_Character]) + for ct, c_l in iteritems(show_info_cast): # type: (integer_types, List[TVInfoCharacter]) if ct not in (RoleTypes.ActorMain, RoleTypes.Host, RoleTypes.Interviewer, RoleTypes.Presenter): continue for c in c_l: @@ -3074,7 +3072,7 @@ class TVShow(TVShowBase): self._imdbid = redirect_check imdb_id = redirect_check imdb_info['imdb_id'] = self.imdbid - i = imdbpie.Imdb(exclude_episodes=True, cachedir=ek.ek(os.path.join, sickgear.CACHE_DIR, 'imdb-pie')) + i = imdbpie.Imdb(exclude_episodes=True, cachedir=os.path.join(sickgear.CACHE_DIR, 'imdb-pie')) if not helpers.parse_imdb_id(imdb_id): logger.log('Not a valid imdbid: %s for show: %s' % (imdb_id, self._name), logger.WARNING) return @@ -3276,10 +3274,10 @@ class TVShow(TVShowBase): # clear the cache ic = image_cache.ImageCache() - for cache_obj in ek.ek(glob.glob, ic.fanart_path(self.tvid, self.prodid).replace('fanart.jpg', '*')) \ - + ek.ek(glob.glob, ic.poster_thumb_path(self.tvid, self.prodid).replace('poster.jpg', '*')) \ - + ek.ek(glob.glob, ic.poster_path(self.tvid, self.prodid).replace('poster.jpg', '*')): - cache_dir = ek.ek(os.path.isdir, cache_obj) + for cache_obj in glob.glob(ic.fanart_path(self.tvid, self.prodid).replace('fanart.jpg', '*')) \ + + glob.glob(ic.poster_thumb_path(self.tvid, self.prodid).replace('poster.jpg', '*')) \ + + glob.glob(ic.poster_path(self.tvid, self.prodid).replace('poster.jpg', '*')): + cache_dir = os.path.isdir(cache_obj) result = helpers.remove_file(cache_obj, tree=cache_dir, log_level=logger.WARNING) if result: logger.log('%s cache %s %s' % (result, cache_dir and 'dir' or 'file', cache_obj)) @@ -3292,12 +3290,12 @@ class TVShow(TVShowBase): try: logger.log('Attempt to %s show folder %s' % (action, self._location)) # check first the read-only attribute - file_attribute = ek.ek(os.stat, self.location)[0] + file_attribute = os.stat(self.location)[0] if not file_attribute & stat.S_IWRITE: # File is read-only, so make it writeable logger.log('Attempting to make writeable the read only folder %s' % self._location, logger.DEBUG) try: - ek.ek(os.chmod, self.location, stat.S_IWRITE) + os.chmod(self.location, stat.S_IWRITE) except (BaseException, Exception): logger.log('Unable to change permissions of %s' % self._location, logger.WARNING) @@ -3324,7 +3322,7 @@ class TVShow(TVShowBase): def refresh_dir(self): # make sure the show dir is where we think it is unless dirs are created on the fly - if not ek.ek(os.path.isdir, self._location) and not sickgear.CREATE_MISSING_SHOW_DIRS: + if not os.path.isdir(self._location) and not sickgear.CREATE_MISSING_SHOW_DIRS: return False # load from dir @@ -3351,7 +3349,7 @@ class TVShow(TVShowBase): for cur_row in sql_result: season = int(cur_row['season']) episode = int(cur_row['episode']) - location = ek.ek(os.path.normpath, cur_row['location']) + location = os.path.normpath(cur_row['location']) try: ep_obj = self.get_episode(season, episode, ep_result=[cur_row]) @@ -3362,7 +3360,7 @@ class TVShow(TVShowBase): # if the path exist and if it's in our show dir if (self.prune and season and ep_obj.location not in attempted and 0 < helpers.get_size(ep_obj.location) and - ek.ek(os.path.normpath, location).startswith(ek.ek(os.path.normpath, self.location))): + os.path.normpath(location).startswith(os.path.normpath(self.location))): with ep_obj.lock: if ep_obj.status in Quality.DOWNLOADED: # locations repeat but attempt to delete once @@ -3377,8 +3375,8 @@ class TVShow(TVShowBase): kept += 1 # if the path doesn't exist or if it's not in our show dir - if not ek.ek(os.path.isfile, location) or not ek.ek(os.path.normpath, location).startswith( - ek.ek(os.path.normpath, self.location)): + if not os.path.isfile(location) or not os.path.normpath(location).startswith( + os.path.normpath(self.location)): # check if downloaded files still exist, update our data if this has changed if 1 != sickgear.SKIP_REMOVED_FILES: @@ -3427,7 +3425,7 @@ class TVShow(TVShowBase): :param force: """ # TODO: Add support for force option - if not ek.ek(os.path.isdir, self._location): + if not os.path.isdir(self._location): logger.log('%s: Show directory doesn\'t exist, can\'t download subtitles' % self.tvid_prodid, logger.DEBUG) return logger.log('%s: Downloading subtitles' % self.tvid_prodid, logger.DEBUG) @@ -3526,11 +3524,11 @@ class TVShow(TVShowBase): save_mapping(self) name_cache.remove_from_namecache(old_tvid, old_prodid) - image_cache_dir = ek.ek(os.path.join, sickgear.CACHE_DIR, 'images', 'shows') - old_dir = ek.ek(os.path.join, image_cache_dir, '%s-%s' % (old_tvid, old_prodid)) - new_dir = ek.ek(os.path.join, image_cache_dir, '%s-%s' % (self.tvid, self.prodid)) + image_cache_dir = os.path.join(sickgear.CACHE_DIR, 'images', 'shows') + old_dir = os.path.join(image_cache_dir, '%s-%s' % (old_tvid, old_prodid)) + new_dir = os.path.join(image_cache_dir, '%s-%s' % (self.tvid, self.prodid)) try: - ek.ek(os.rename, old_dir, new_dir) + os.rename(old_dir, new_dir) except (BaseException, Exception) as e: logger.log('Unable to rename %s to %s: %s / %s' % (old_dir, new_dir, repr(e), ex(e)), logger.WARNING) @@ -3556,7 +3554,7 @@ class TVShow(TVShowBase): if update_show: # force the update try: - sickgear.show_queue_scheduler.action.updateShow( + sickgear.show_queue_scheduler.action.update_show( self, force=True, web=True, priority=QueuePriorities.VERYHIGH, pausestatus_after=pausestatus_after, switch_src=True) except exceptions_helper.CantUpdateException as e: @@ -3944,8 +3942,8 @@ class TVEpisode(TVEpisodeBase): # self._location = newLocation self.dirty_setter('_location')(self, val) - if val and ek.ek(os.path.isfile, val): - self.file_size = ek.ek(os.path.getsize, val) + if val and os.path.isfile(val): + self.file_size = os.path.getsize(val) else: self.file_size = 0 @@ -3968,7 +3966,7 @@ class TVEpisode(TVEpisodeBase): return # TODO: Add support for force option - if not ek.ek(os.path.isfile, self.location): + if not os.path.isfile(self.location): logger.log('%s: Episode file doesn\'t exist, can\'t download subtitles for episode %sx%s' % (self.show_obj.tvid_prodid, self.season, self.episode), logger.DEBUG) return @@ -3987,7 +3985,7 @@ class TVEpisode(TVEpisodeBase): if sickgear.SUBTITLES_DIR: for video in subs: - subs_new_path = ek.ek(os.path.join, ek.ek(os.path.dirname, video.path), sickgear.SUBTITLES_DIR) + subs_new_path = os.path.join(os.path.dirname(video.path), sickgear.SUBTITLES_DIR) dir_exists = helpers.make_dir(subs_new_path) if not dir_exists: logger.log('Unable to create subtitles folder %s' % subs_new_path, logger.ERROR) @@ -3995,7 +3993,7 @@ class TVEpisode(TVEpisodeBase): helpers.chmod_as_parent(subs_new_path) for subtitle in subs.get(video): - new_file_path = ek.ek(os.path.join, subs_new_path, ek.ek(os.path.basename, subtitle.path)) + new_file_path = os.path.join(subs_new_path, os.path.basename(subtitle.path)) helpers.move_file(subtitle.path, new_file_path) helpers.chmod_as_parent(new_file_path) else: @@ -4052,7 +4050,7 @@ class TVEpisode(TVEpisodeBase): hastbn = False # check for nfo and tbn - if ek.ek(os.path.isfile, self.location): + if os.path.isfile(self.location): for cur_provider in itervalues(sickgear.metadata_provider_dict): if cur_provider.episode_metadata: new_result = cur_provider.has_episode_metadata(self) @@ -4085,7 +4083,7 @@ class TVEpisode(TVEpisodeBase): """ if not self.load_from_db(season, episode, **kwargs): # only load from NFO if we didn't load from DB - if ek.ek(os.path.isfile, self.location): + if os.path.isfile(self.location): try: self.load_from_nfo(self.location) except exceptions_helper.NoNFOException: @@ -4168,7 +4166,7 @@ class TVEpisode(TVEpisodeBase): self._subtitles_searchcount = show_result['subtitles_searchcount'] self._timestamp = show_result['timestamp'] or self._make_timestamp() self._version = self._version if not show_result['version'] else int(show_result['version']) - self.location = show_result['location'] and ek.ek(os.path.normpath, show_result['location']) or self.location + self.location = show_result['location'] and os.path.normpath(show_result['location']) or self.location if None is not show_result['release_group']: self._release_group = show_result['release_group'] @@ -4414,7 +4412,7 @@ class TVEpisode(TVEpisodeBase): # don't update show status if show dir is missing, unless it's missing on purpose # noinspection PyProtectedMember - if not ek.ek(os.path.isdir, self._show_obj._location) \ + if not os.path.isdir(self._show_obj._location) \ and not sickgear.CREATE_MISSING_SHOW_DIRS and not sickgear.ADD_SHOWS_WO_DIR: if UNKNOWN == self._status: self.status = (SKIPPED, UNAIRED)[future_airtime] @@ -4430,7 +4428,7 @@ class TVEpisode(TVEpisodeBase): logger.DEBUG) # if we don't have the file - if not ek.ek(os.path.isfile, self._location): + if not os.path.isfile(self._location): if self._status in [SKIPPED, UNAIRED, UNKNOWN, WANTED]: very_old_delta = datetime.timedelta(days=90) @@ -4498,7 +4496,7 @@ class TVEpisode(TVEpisodeBase): :type location: AnyStr """ # noinspection PyProtectedMember - if not ek.ek(os.path.isdir, self._show_obj._location): + if not os.path.isdir(self._show_obj._location): logger.log('%s: The show directory is missing, not bothering to try loading the episode NFO' % self._show_obj.tvid_prodid) return @@ -4518,14 +4516,14 @@ class TVEpisode(TVEpisodeBase): nfo_file = sickgear.helpers.replace_extension(self.location, 'nfo') logger.log('%s: Using NFO name %s' % (self._show_obj.tvid_prodid, nfo_file), logger.DEBUG) - if ek.ek(os.path.isfile, nfo_file): + if os.path.isfile(nfo_file): try: show_xml = etree.ElementTree(file=nfo_file) except (SyntaxError, ValueError) as e: logger.log('Error loading the NFO, backing up the NFO and skipping for now: %s' % ex(e), logger.ERROR) # TODO: figure out what's wrong and fix it try: - ek.ek(os.rename, nfo_file, '%s.old' % nfo_file) + os.rename(nfo_file, '%s.old' % nfo_file) except (BaseException, Exception) as e: logger.log( 'Failed to rename your episode\'s NFO file - you need to delete it or fix it: %s' % ex(e), @@ -4576,7 +4574,7 @@ class TVEpisode(TVEpisodeBase): else: self.hasnfo = False - if ek.ek(os.path.isfile, sickgear.helpers.replace_extension(nfo_file, 'tbn')): + if os.path.isfile(sickgear.helpers.replace_extension(nfo_file, 'tbn')): self.hastbn = True else: self.hastbn = False @@ -4613,7 +4611,7 @@ class TVEpisode(TVEpisodeBase): def create_meta_files(self, force=False): # noinspection PyProtectedMember - if not ek.ek(os.path.isdir, self.show_obj._location): + if not os.path.isdir(self.show_obj._location): logger.log('%s: The show directory is missing, not bothering to try to create metadata' % self.show_obj.tvid_prodid) return @@ -4797,7 +4795,7 @@ class TVEpisode(TVEpisodeBase): # def full_location(self): # if self.location in (None, ''): # return None - # return ek.ek(os.path.join, self.show_obj.location, self.location) + # return os.path.join(self.show_obj.location, self.location) # # # TODO: remove if unused # def create_strings(self, pattern=None): @@ -5140,7 +5138,7 @@ class TVEpisode(TVEpisodeBase): return result # if not we append the folder on and use that - return ek.ek(os.path.join, self.formatted_dir(), result) + return os.path.join(self.formatted_dir(), result) def formatted_dir(self, pattern=None, multi=None): """ @@ -5164,7 +5162,7 @@ class TVEpisode(TVEpisodeBase): if 1 == len(name_groups): logger.debug('No Season Folder set in Naming pattern: %s' % pattern) return '' - return self._format_pattern(ek.ek(os.sep.join, name_groups[:-1]), multi) + return self._format_pattern(os.sep.join(name_groups[:-1]), multi) def formatted_filename(self, pattern=None, multi=None, anime_type=None): """ @@ -5193,13 +5191,13 @@ class TVEpisode(TVEpisodeBase): in the naming settings. """ - if not ek.ek(os.path.isfile, self.location): + if not os.path.isfile(self.location): logger.log('Can\'t perform rename on %s when it doesn\'t exist, skipping' % self.location, logger.WARNING) return proper_path = self.proper_path() - absolute_proper_path = ek.ek(os.path.join, self._show_obj.location, proper_path) - absolute_current_path_no_ext, file_ext = ek.ek(os.path.splitext, self.location) + absolute_proper_path = os.path.join(self._show_obj.location, proper_path) + absolute_current_path_no_ext, file_ext = os.path.splitext(self.location) absolute_current_path_no_ext_length = len(absolute_current_path_no_ext) related_subs = [] @@ -5224,7 +5222,7 @@ class TVEpisode(TVEpisodeBase): if self.show_obj.subtitles and '' != sickgear.SUBTITLES_DIR: related_subs = postProcessor.PostProcessor(self.location).list_associated_files(sickgear.SUBTITLES_DIR, subtitles_only=True) - # absolute_proper_subs_path = ek.ek(os.path.join, sickgear.SUBTITLES_DIR, self.formatted_filename()) + # absolute_proper_subs_path = os.path.join(sickgear.SUBTITLES_DIR, self.formatted_filename()) logger.log('Files associated to %s: %s' % (self.location, related_files), logger.DEBUG) @@ -5239,7 +5237,7 @@ class TVEpisode(TVEpisodeBase): logger.log('%s: Unable to rename file %s' % (self._epid, cur_related_file), logger.ERROR) for cur_related_sub in related_subs: - absolute_proper_subs_path = ek.ek(os.path.join, sickgear.SUBTITLES_DIR, self.formatted_filename()) + absolute_proper_subs_path = os.path.join(sickgear.SUBTITLES_DIR, self.formatted_filename()) renamed = helpers.rename_ep_file(cur_related_sub, absolute_proper_subs_path, absolute_current_path_no_ext_length) if not renamed: @@ -5277,7 +5275,7 @@ class TVEpisode(TVEpisodeBase): has_timestamp = isinstance(self._timestamp, int) and 1 < self._timestamp if not has_timestamp and (not isinstance(self._airdate, datetime.date) or 1 == self._airdate.year): logger.log('%s: Did not change modify date of %s because episode date is never aired or invalid' - % (self._show_obj.tvid_prodid, ek.ek(os.path.basename, self.location)), logger.DEBUG) + % (self._show_obj.tvid_prodid, os.path.basename(self.location)), logger.DEBUG) return aired_dt = None @@ -5292,7 +5290,7 @@ class TVEpisode(TVEpisodeBase): try: aired_epoch = SGDatetime.to_file_timestamp(aired_dt) - filemtime = int(ek.ek(os.path.getmtime, self.location)) + filemtime = int(os.path.getmtime(self.location)) except (BaseException, Exception): return @@ -5303,7 +5301,7 @@ class TVEpisode(TVEpisodeBase): result, loglevel = 'Error changing', logger.WARNING logger.log('%s: %s modify date of %s to show air date %s' - % (self._show_obj.tvid_prodid, result, ek.ek(os.path.basename, self.location), + % (self._show_obj.tvid_prodid, result, os.path.basename(self.location), 'n/a' if not aired_dt else aired_dt.strftime('%b %d,%Y (%H:%M)')), loglevel) def __getstate__(self): diff --git a/sickgear/ui.py b/sickgear/ui.py index a15743dc..b03d9728 100644 --- a/sickgear/ui.py +++ b/sickgear/ui.py @@ -158,10 +158,10 @@ class QueueProgressIndicator(object): return len(self.queueItemList) def numFinished(self): - return len([x for x in self.queueItemList if not x.isInQueue()]) + return len([x for x in self.queueItemList if not x.is_in_queue()]) def numRemaining(self): - return len([x for x in self.queueItemList if x.isInQueue()]) + return len([x for x in self.queueItemList if x.is_in_queue()]) def nextName(self): for curItem in [ diff --git a/sickgear/version_checker.py b/sickgear/version_checker.py index a2c892a2..23609e71 100644 --- a/sickgear/version_checker.py +++ b/sickgear/version_checker.py @@ -25,8 +25,6 @@ import time import traceback from . import gh_api as github -# noinspection PyPep8Naming -import encodingKludge as ek from exceptions_helper import ex import sickgear @@ -176,7 +174,7 @@ class SoftwareUpdater(object): 'git': running from source using git 'source': running from source without git """ - return ('source', 'git')[os.path.isdir(ek.ek(os.path.join, sickgear.PROG_DIR, '.git'))] + return ('source', 'git')[os.path.isdir(os.path.join(sickgear.PROG_DIR, '.git'))] def check_for_new_version(self, force=False): """ @@ -754,7 +752,7 @@ class SourceUpdateManager(UpdateManager): try: # prepare the update dir - sg_update_dir = ek.ek(os.path.join, sickgear.PROG_DIR, u'sg-update') + sg_update_dir = os.path.join(sickgear.PROG_DIR, u'sg-update') if os.path.isdir(sg_update_dir): logger.log(u'Clearing out update folder %s before extracting' % sg_update_dir) @@ -768,11 +766,11 @@ class SourceUpdateManager(UpdateManager): tar_download_path = os.path.join(sg_update_dir, u'sg-update.tar') urllib.request.urlretrieve(tar_download_url, tar_download_path) - if not ek.ek(os.path.isfile, tar_download_path): + if not os.path.isfile(tar_download_path): logger.error(u'Unable to retrieve new version from %s, can\'t update' % tar_download_url) return False - if not ek.ek(tarfile.is_tarfile, tar_download_path): + if not tarfile.is_tarfile(tar_download_path): logger.error(u'Retrieved version from %s is corrupt, can\'t update' % tar_download_url) return False diff --git a/sickgear/webapi.py b/sickgear/webapi.py index b13bea0d..691f2c6c 100644 --- a/sickgear/webapi.py +++ b/sickgear/webapi.py @@ -31,8 +31,6 @@ import time import traceback from . import webserve -# noinspection PyPep8Naming -import encodingKludge as ek import exceptions_helper from exceptions_helper import ex from json_helper import is_orjson, json_dumps, JSON_INDENT, json_loads, JSONEncoder, ORJSON_OPTIONS @@ -833,7 +831,7 @@ def _getRootDirs(): for root_dir in root_dirs: valid = 1 try: - ek.ek(os.listdir, root_dir) + os.listdir(root_dir) except (BaseException, Exception): valid = 0 default = 0 @@ -2003,7 +2001,7 @@ class CMD_SickGearAddRootDir(ApiCall): index = 0 # disallow adding/setting an invalid dir - if not ek.ek(os.path.isdir, self.location): + if not os.path.isdir(self.location): return _responds(RESULT_FAILURE, msg="Location is invalid") root_dirs = [] @@ -2340,8 +2338,8 @@ class CMD_SickGearGetIndexerIcon(ApiCall): self.handler.set_status(404) return _responds(RESULT_FAILURE, 'Icon not found') img = i['icon'] - image = ek.ek(os.path.join, sickgear.PROG_DIR, 'gui', 'slick', 'images', img) - if not ek.ek(os.path.isfile, image): + image = os.path.join(sickgear.PROG_DIR, 'gui', 'slick', 'images', img) + if not os.path.isfile(image): self.handler.set_status(404) return _responds(RESULT_FAILURE, 'Icon not found') return {'outputType': 'image', 'image': self.handler.get_image(image)} @@ -2361,9 +2359,8 @@ class CMD_SickGearGetNetworkIcon(ApiCall): ApiCall.__init__(self, handler, args, kwargs) def run(self): - image = ek.ek(os.path.join, sickgear.PROG_DIR, 'gui', 'slick', 'images', 'network', - '%s.png' % self.network.lower()) - if not ek.ek(os.path.isfile, image): + image = os.path.join(sickgear.PROG_DIR, 'gui', 'slick', 'images', 'network', '%s.png' % self.network.lower()) + if not os.path.isfile(image): self.handler.set_status(404) return _responds(RESULT_FAILURE, 'Icon not found') return {'outputType': 'image', 'image': self.handler.get_image(image)} @@ -3328,7 +3325,7 @@ class CMD_SickGearShowAddExisting(ApiCall): if show_obj: return _responds(RESULT_FAILURE, msg="An existing indexerid already exists in the database") - if not ek.ek(os.path.isdir, self.location): + if not os.path.isdir(self.location): return _responds(RESULT_FAILURE, msg='Not a valid location') lINDEXER_API_PARMS = sickgear.TVInfoAPI(self.tvid).api_params.copy() @@ -3460,7 +3457,7 @@ class CMD_SickGearShowAddNew(ApiCall): else: return _responds(RESULT_FAILURE, msg="Root directory is not set, please provide a location") - if not ek.ek(os.path.isdir, self.location): + if not os.path.isdir(self.location): return _responds(RESULT_FAILURE, msg="'" + self.location + "' is not a valid location") # use default quality as a failsafe @@ -3611,9 +3608,9 @@ class CMD_SickGearShowCache(ApiCall): has_poster = 0 has_banner = 0 - if ek.ek(os.path.isfile, cache_obj.poster_path(show_obj.tvid, show_obj.prodid)): + if os.path.isfile(cache_obj.poster_path(show_obj.tvid, show_obj.prodid)): has_poster = 1 - if ek.ek(os.path.isfile, cache_obj.banner_path(show_obj.tvid, show_obj.prodid)): + if os.path.isfile(cache_obj.banner_path(show_obj.tvid, show_obj.prodid)): has_banner = 1 return _responds(RESULT_SUCCESS, {"poster": has_poster, "banner": has_banner}) @@ -3663,8 +3660,8 @@ class CMD_SickGearShowDelete(ApiCall): if not show_obj: return _responds(RESULT_FAILURE, msg="Show not found") - if sickgear.show_queue_scheduler.action.isBeingAdded( - show_obj) or sickgear.show_queue_scheduler.action.isBeingUpdated(show_obj): + if sickgear.show_queue_scheduler.action.is_being_added( + show_obj) or sickgear.show_queue_scheduler.action.is_being_updated(show_obj): return _responds(RESULT_FAILURE, msg="Show can not be deleted while being added or updated") show_obj.delete_show(full=self.full_delete) @@ -3834,8 +3831,7 @@ class CMD_SickGearShowListFanart(ApiCall): fanart = [] rating_names = {10: 'group', 20: 'favorite', 30: 'avoid'} cache_obj = image_cache.ImageCache() - for img in ek.ek(glob.glob, cache_obj.fanart_path( - show_obj.tvid, show_obj.prodid).replace('fanart.jpg', '*')) or []: + for img in glob.glob(cache_obj.fanart_path(show_obj.tvid, show_obj.prodid).replace('fanart.jpg', '*')) or []: match = re.search(r'(\d+(?:\.(\w*?(\d*)))?\.(?:\w{5,8}))\.fanart\.', img, re.I) if match and match.group(1): fanart += [(match.group(1), rating_names.get(sickgear.FANART_RATINGS.get( @@ -3870,7 +3866,7 @@ class CMD_SickGearShowRateFanart(ApiCall): cache_obj = image_cache.ImageCache() fanartfile = cache_obj.fanart_path(self.tvid, self.prodid).replace('fanart.jpg', '%s.fanart.jpg' % self.fanartname) - if not ek.ek(os.path.isfile, fanartfile): + if not os.path.isfile(fanartfile): return _responds(RESULT_FAILURE, msg='Unknown Fanart') fan_ratings = {'unrate': 0, 'group': 10, 'favorite': 20, 'avoid': 30} show_id = TVidProdid({self.tvid: self.prodid})() @@ -3906,19 +3902,19 @@ class CMD_SickGearShowGetFanart(ApiCall): def run(self): """ get the fanart stored for a show """ cache_obj = image_cache.ImageCache() - default_fanartfile = ek.ek(os.path.join, sickgear.PROG_DIR, 'gui', 'slick', 'images', 'trans.png') + default_fanartfile = os.path.join(sickgear.PROG_DIR, 'gui', 'slick', 'images', 'trans.png') fanartfile = default_fanartfile used_fanart = 'default' if self.fanartname: fanartfile = cache_obj.fanart_path(self.tvid, self.prodid).replace('fanart.jpg', '%s.fanart.jpg' % self.fanartname) - if not ek.ek(os.path.isfile, fanartfile): + if not os.path.isfile(fanartfile): fanartfile = default_fanartfile used_fanart = self.fanartname else: fanart = [] - for img in ek.ek(glob.glob, cache_obj.fanart_path(self.tvid, self.prodid).replace('fanart.jpg', '*')) or []: - if not ek.ek(os.path.isfile, img): + for img in glob.glob(cache_obj.fanart_path(self.tvid, self.prodid).replace('fanart.jpg', '*')) or []: + if not os.path.isfile(img): continue match = re.search(r'(\d+(?:\.(\w*?(\d*)))?\.(?:\w{5,8}))\.fanart\.', img, re.I) if match and match.group(1): @@ -3933,8 +3929,8 @@ class CMD_SickGearShowGetFanart(ApiCall): fanartfile = fanartsorted[random_fanart][0] used_fanart = fanartsorted[random_fanart][1] - if fanartfile and ek.ek(os.path.isfile, fanartfile): - with ek.ek(open, fanartfile, 'rb') as f: + if fanartfile and os.path.isfile(fanartfile): + with open(fanartfile, 'rb') as f: mime_type, encoding = MimeTypes().guess_type(fanartfile) self.handler.set_header('X-Fanartname', used_fanart) self.handler.set_header('Content-Type', mime_type) @@ -4021,7 +4017,7 @@ class CMD_SickGearShowRefresh(ApiCall): return _responds(RESULT_FAILURE, msg="Show not found") try: - sickgear.show_queue_scheduler.action.refreshShow(show_obj) + sickgear.show_queue_scheduler.action.refresh_show(show_obj) return _responds(RESULT_SUCCESS, msg='%s has queued to be refreshed' % show_obj.unique_name) except exceptions_helper.CantRefreshException as e: # TODO: log the exception @@ -4443,7 +4439,7 @@ class CMD_SickGearShowUpdate(ApiCall): return _responds(RESULT_FAILURE, msg="Show not found") try: - sickgear.show_queue_scheduler.action.updateShow(show_obj, True) + sickgear.show_queue_scheduler.action.update_show(show_obj, True) return _responds(RESULT_SUCCESS, msg='%s has queued to be updated' % show_obj.unique_name) except exceptions_helper.CantUpdateException as e: self.log(u'Unable to update %s. %s' % (show_obj.unique_name, ex(e)), logger.ERROR) @@ -4655,7 +4651,7 @@ class CMD_SickGearShowsForceUpdate(ApiCall): def run(self): """ force the daily show update now """ - if sickgear.show_queue_scheduler.action.isShowUpdateRunning() \ + if sickgear.show_queue_scheduler.action.is_show_update_running() \ or sickgear.show_update_scheduler.action.amActive: return _responds(RESULT_FAILURE, msg="show update already running.") diff --git a/sickgear/webserve.py b/sickgear/webserve.py index 13ab4a6d..cfbafc7a 100644 --- a/sickgear/webserve.py +++ b/sickgear/webserve.py @@ -37,8 +37,6 @@ import zipfile from exceptions_helper import ex, MultipleShowObjectsException import exceptions_helper -# noinspection PyPep8Naming -import encodingKludge as ek from json_helper import json_dumps, json_loads import sg_helpers from sg_helpers import remove_file, scantree @@ -173,7 +171,7 @@ class BaseStaticFileHandler(StaticFileHandler): return super(BaseStaticFileHandler, self).write_error(status_code, **kwargs) def validate_absolute_path(self, root, absolute_path): - if '\\images\\flags\\' in absolute_path and not ek.ek(os.path.isfile, absolute_path): + if '\\images\\flags\\' in absolute_path and not os.path.isfile(absolute_path): absolute_path = re.sub(r'\\[^\\]+\.png$', '\\\\unknown.png', absolute_path) return super(BaseStaticFileHandler, self).validate_absolute_path(root, absolute_path) @@ -281,10 +279,10 @@ class BaseHandler(RouteHandler): return True def get_image(self, image): - if ek.ek(os.path.isfile, image): + if os.path.isfile(image): mime_type, encoding = MimeTypes().guess_type(image) self.set_header('Content-Type', mime_type) - with ek.ek(open, image, 'rb') as img: + with open(image, 'rb') as img: return img.read() def show_poster(self, tvid_prodid=None, which=None, api=None): @@ -316,19 +314,19 @@ class BaseHandler(RouteHandler): ('%s' % (re.sub(r'.*?fanart_(\d+(?:\.\w{1,20})?\.\w{5,8}).*', r'\1.', which, 0, re.I)),))] for cur_name in image_file_name: - if ek.ek(os.path.isfile, cur_name): + if os.path.isfile(cur_name): static_image_path = cur_name break if api: - used_file = ek.ek(os.path.basename, static_image_path) + used_file = os.path.basename(static_image_path) if static_image_path.startswith('/images'): used_file = 'default' - static_image_path = ek.ek(os.path.join, sickgear.PROG_DIR, 'gui', 'slick', static_image_path[1:]) + static_image_path = os.path.join(sickgear.PROG_DIR, 'gui', 'slick', static_image_path[1:]) mime_type, encoding = MimeTypes().guess_type(static_image_path) self.set_header('Content-Type', mime_type) self.set_header('X-Filename', used_file) - with ek.ek(open, static_image_path, 'rb') as img: + with open(static_image_path, 'rb') as img: return img.read() else: static_image_path = os.path.normpath(static_image_path.replace(sickgear.CACHE_DIR, '/cache')) @@ -472,37 +470,37 @@ class RepoHandler(BaseStaticFileHandler): super(RepoHandler, self).initialize(*args, **kwargs) logger.log('Kodi req... initialize(path): %s' % kwargs['path'], logger.DEBUG) - cache_client = ek.ek(os.path.join, sickgear.CACHE_DIR, 'clients') - cache_client_kodi = ek.ek(os.path.join, cache_client, 'kodi') - cache_client_kodi_watchedstate = ek.ek(os.path.join, cache_client_kodi, 'service.sickgear.watchedstate.updater') + cache_client = os.path.join(sickgear.CACHE_DIR, 'clients') + cache_client_kodi = os.path.join(cache_client, 'kodi') + cache_client_kodi_watchedstate = os.path.join(cache_client_kodi, 'service.sickgear.watchedstate.updater') - cache_resources = ek.ek(os.path.join, cache_client_kodi_watchedstate, 'resources') - cache_lang = ek.ek(os.path.join, cache_resources, 'language') - cache_other_lang = ek.ek(os.path.join, cache_lang, ('English', 'resource.language.en_gb')[self.kodi_is_legacy]) - ek.ek(os.path.exists, cache_other_lang) and remove_file(cache_other_lang, tree=True) + cache_resources = os.path.join(cache_client_kodi_watchedstate, 'resources') + cache_lang = os.path.join(cache_resources, 'language') + cache_other_lang = os.path.join(cache_lang, ('English', 'resource.language.en_gb')[self.kodi_is_legacy]) + os.path.exists(cache_other_lang) and remove_file(cache_other_lang, tree=True) - cache_lang_sub = ek.ek(os.path.join, cache_lang, ('resource.language.en_gb', 'English')[self.kodi_is_legacy]) + cache_lang_sub = os.path.join(cache_lang, ('resource.language.en_gb', 'English')[self.kodi_is_legacy]) for folder in (cache_client, cache_client_kodi, - ek.ek(os.path.join, cache_client_kodi, 'repository.sickgear'), + os.path.join(cache_client_kodi, 'repository.sickgear'), cache_client_kodi_watchedstate, - ek.ek(os.path.join, cache_resources), + os.path.join(cache_resources), cache_lang, cache_lang_sub, ): - if not ek.ek(os.path.exists, folder): - ek.ek(os.mkdir, folder) + if not os.path.exists(folder): + os.mkdir(folder) - with io.open(ek.ek(os.path.join, cache_client_kodi, 'index.html'), 'w') as fh: + with io.open(os.path.join(cache_client_kodi, 'index.html'), 'w') as fh: fh.write(self.render_kodi_index()) - with io.open(ek.ek(os.path.join, cache_client_kodi, 'repository.sickgear', 'index.html'), 'w') as fh: + with io.open(os.path.join(cache_client_kodi, 'repository.sickgear', 'index.html'), 'w') as fh: fh.write(self.render_kodi_repository_sickgear_index()) - with io.open(ek.ek(os.path.join, cache_client_kodi_watchedstate, 'index.html'), 'w') as fh: + with io.open(os.path.join(cache_client_kodi_watchedstate, 'index.html'), 'w') as fh: fh.write(self.render_kodi_service_sickgear_watchedstate_updater_index()) - with io.open(ek.ek(os.path.join, cache_resources, 'index.html'), 'w') as fh: + with io.open(os.path.join(cache_resources, 'index.html'), 'w') as fh: fh.write(self.render_kodi_service_sickgear_watchedstate_updater_resources_index()) - with io.open(ek.ek(os.path.join, cache_lang, 'index.html'), 'w') as fh: + with io.open(os.path.join(cache_lang, 'index.html'), 'w') as fh: fh.write(self.render_kodi_service_sickgear_watchedstate_updater_resources_language_index()) - with io.open(ek.ek(os.path.join, cache_lang_sub, 'index.html'), 'w') as fh: + with io.open(os.path.join(cache_lang_sub, 'index.html'), 'w') as fh: fh.write(self.render_kodi_service_sickgear_watchedstate_updater_resources_language_english_index()) ''' @@ -511,7 +509,7 @@ class RepoHandler(BaseStaticFileHandler): if repo rendered md5 changes or flag is true, update the repo addon, where repo version *must* be increased ''' - repo_md5_file = ek.ek(os.path.join, cache_client_kodi, 'addons.xml.md5') + repo_md5_file = os.path.join(cache_client_kodi, 'addons.xml.md5') saved_md5 = None try: with io.open(repo_md5_file, 'r', encoding='utf8') as fh: @@ -520,18 +518,18 @@ class RepoHandler(BaseStaticFileHandler): pass rendered_md5 = self.render_kodi_repo_addons_xml_md5() if saved_md5 != rendered_md5: - with io.open(ek.ek(os.path.join, cache_client_kodi, 'repository.sickgear', 'addon.xml'), 'w') as fh: + with io.open(os.path.join(cache_client_kodi, 'repository.sickgear', 'addon.xml'), 'w') as fh: fh.write(self.render_kodi_repo_addon_xml()) - with io.open(ek.ek(os.path.join, cache_client_kodi_watchedstate, 'addon.xml'), 'w') as fh: + with io.open(os.path.join(cache_client_kodi_watchedstate, 'addon.xml'), 'w') as fh: fh.write(self.get_watchedstate_updater_addon_xml()) - with io.open(ek.ek(os.path.join, cache_client_kodi, 'addons.xml'), 'w') as fh: + with io.open(os.path.join(cache_client_kodi, 'addons.xml'), 'w') as fh: fh.write(self.render_kodi_repo_addons_xml()) - with io.open(ek.ek(os.path.join, cache_client_kodi, 'addons.xml.md5'), 'w') as fh: + with io.open(os.path.join(cache_client_kodi, 'addons.xml.md5'), 'w') as fh: fh.write(rendered_md5) def save_zip(name, version, zip_path, zip_method): zip_name = '%s-%s.zip' % (name, version) - zip_file = ek.ek(os.path.join, zip_path, zip_name) + zip_file = os.path.join(zip_path, zip_name) for direntry in helpers.scantree(zip_path, ['resources'], [r'\.(?:md5|zip)$'], filter_kind=False): remove_file_perm(direntry.path) zip_data = zip_method() @@ -539,11 +537,11 @@ class RepoHandler(BaseStaticFileHandler): zh.write(zip_data) # Force a UNIX line ending, like the md5sum utility. - with io.open(ek.ek(os.path.join, zip_path, '%s.md5' % zip_name), 'w', newline='\n') as zh: + with io.open(os.path.join(zip_path, '%s.md5' % zip_name), 'w', newline='\n') as zh: zh.write(u'%s *%s\n' % (self.md5ify(zip_data), zip_name)) aid, ver = self.repo_sickgear_details() - save_zip(aid, ver, ek.ek(os.path.join, cache_client_kodi, 'repository.sickgear'), + save_zip(aid, ver, os.path.join(cache_client_kodi, 'repository.sickgear'), self.kodi_repository_sickgear_zip) aid, ver = self.addon_watchedstate_details() @@ -566,8 +564,8 @@ class RepoHandler(BaseStaticFileHandler): (cache_lang_sub, 'strings.xml') ))[self.kodi_is_legacy], ): - helpers.copy_file(ek.ek( - os.path.join, *(sickgear.PROG_DIR, 'sickgear', 'clients', 'kodi') + src), ek.ek(os.path.join, *dst)) + helpers.copy_file( + os.path.join(*(sickgear.PROG_DIR, 'sickgear', 'clients', 'kodi') + src), os.path.join(*dst)) def get_content_type(self): if '.md5' == self.absolute_path[-4:] or '.po' == self.absolute_path[-3:]: @@ -583,7 +581,7 @@ class RepoHandler(BaseStaticFileHandler): t.addon = '%s-%s.zip' % self.addon_watchedstate_details() try: - with open(ek.ek(os.path.join, sickgear.PROG_DIR, 'CHANGES.md')) as fh: + with open(os.path.join(sickgear.PROG_DIR, 'CHANGES.md')) as fh: t.version = re.findall(r'###[^0-9x]+([0-9]+\.[0-9]+\.[0-9x]+)', fh.readline())[0] except (BaseException, Exception): t.version = '' @@ -640,8 +638,8 @@ class RepoHandler(BaseStaticFileHandler): return sickgear.MEMCACHE.get(mem_key).get('data') filename = 'addon%s.xml' % self.kodi_include - with io.open(ek.ek(os.path.join, sickgear.PROG_DIR, 'sickgear', 'clients', - 'kodi', 'service.sickgear.watchedstate.updater', filename), 'r', encoding='utf8') as fh: + with io.open(os.path.join(sickgear.PROG_DIR, 'sickgear', 'clients', 'kodi', + 'service.sickgear.watchedstate.updater', filename), 'r', encoding='utf8') as fh: xml = fh.read().strip() % dict(ADDON_VERSION=self.get_addon_version(self.kodi_include)) sickgear.MEMCACHE[mem_key] = dict(last_update=30 + int(timestamp_near(datetime.datetime.now())), data=xml) @@ -662,8 +660,8 @@ class RepoHandler(BaseStaticFileHandler): return sickgear.MEMCACHE.get(mem_key).get('data') filename = 'service%s.py' % kodi_include - with io.open(ek.ek(os.path.join, sickgear.PROG_DIR, 'sickgear', 'clients', - 'kodi', 'service.sickgear.watchedstate.updater', filename), 'r', encoding='utf8') as fh: + with io.open(os.path.join(sickgear.PROG_DIR, 'sickgear', 'clients', 'kodi', + 'service.sickgear.watchedstate.updater', filename), 'r', encoding='utf8') as fh: version = re.findall(r'ADDON_VERSION\s*?=\s*?\'([^\']+)', fh.read())[0] sickgear.MEMCACHE[mem_key] = dict(last_update=30 + int(timestamp_near(datetime.datetime.now())), data=version) @@ -705,8 +703,8 @@ class RepoHandler(BaseStaticFileHandler): with zipfile.ZipFile(bfr, 'w') as zh: zh.writestr('repository.sickgear/addon.xml', self.render_kodi_repo_addon_xml(), zipfile.ZIP_DEFLATED) - with io.open(ek.ek(os.path.join, sickgear.PROG_DIR, - 'sickgear', 'clients', 'kodi', 'repository.sickgear', 'icon.png'), 'rb') as fh: + with io.open(os.path.join(sickgear.PROG_DIR, 'sickgear', 'clients', 'kodi', + 'repository.sickgear', 'icon.png'), 'rb') as fh: infile = fh.read() zh.writestr('repository.sickgear/icon.png', infile, zipfile.ZIP_DEFLATED) except OSError as e: @@ -719,12 +717,12 @@ class RepoHandler(BaseStaticFileHandler): def kodi_service_sickgear_watchedstate_updater_zip(self): bfr = io.BytesIO() - basepath = ek.ek(os.path.join, sickgear.PROG_DIR, 'sickgear', 'clients', 'kodi') + basepath = os.path.join(sickgear.PROG_DIR, 'sickgear', 'clients', 'kodi') - zip_path = ek.ek(os.path.join, basepath, 'service.sickgear.watchedstate.updater') - devenv_src = ek.ek(os.path.join, sickgear.PROG_DIR, 'tests', '_devenv.py') - devenv_dst = ek.ek(os.path.join, zip_path, '_devenv.py') - if sickgear.ENV.get('DEVENV') and ek.ek(os.path.exists, devenv_src): + zip_path = os.path.join(basepath, 'service.sickgear.watchedstate.updater') + devenv_src = os.path.join(sickgear.PROG_DIR, 'tests', '_devenv.py') + devenv_dst = os.path.join(zip_path, '_devenv.py') + if sickgear.ENV.get('DEVENV') and os.path.exists(devenv_src): helpers.copy_file(devenv_src, devenv_dst) else: helpers.remove_file_perm(devenv_dst) @@ -746,7 +744,7 @@ class RepoHandler(BaseStaticFileHandler): infile = fh.read() with zipfile.ZipFile(bfr, 'a') as zh: - zh.writestr(ek.ek(os.path.relpath, direntry.path.replace(self.kodi_legacy, ''), basepath), + zh.writestr(os.path.relpath(direntry.path.replace(self.kodi_legacy, ''), basepath), infile, zipfile.ZIP_DEFLATED) except OSError as e: logger.log('Unable to zip %s: %r / %s' % (direntry.path, e, ex(e)), logger.WARNING) @@ -890,7 +888,7 @@ class LogfileHandler(BaseHandler): self.set_header('Content-Type', 'text/html; charset=utf-8') self.set_header('Content-Description', 'Logfile Download') self.set_header('Content-Disposition', 'attachment; filename=sickgear.log') - # self.set_header('Content-Length', ek.ek(os.path.getsize, logfile_name)) + # self.set_header('Content-Length', os.path.getsize(logfile_name)) auths = sickgear.GenericProvider.dedupe_auths(True) rxc_auths = re.compile('(?i)%s' % '|'.join([(re.escape(_a)) for _a in auths])) replacements = dict([(_a, starify(_a)) for _a in auths]) @@ -1192,7 +1190,7 @@ class MainHandler(WebHandler): if tvid_prodid in fanarts: continue - for img in ek.ek(glob.glob, cache_obj.fanart_path(*tvid_prodid_obj.tuple).replace('fanart.jpg', '*')) or []: + for img in glob.glob(cache_obj.fanart_path(*tvid_prodid_obj.tuple).replace('fanart.jpg', '*')) or []: match = re.search(r'(\d+(?:\.\w*)?\.\w{5,8})\.fanart\.', img, re.I) if not match: continue @@ -1276,8 +1274,8 @@ class MainHandler(WebHandler): elif 'backart' in kwargs: sickgear.EPISODE_VIEW_BACKGROUND = backart sickgear.FANART_PANEL = 'highlight-off' == sickgear.FANART_PANEL and 'highlight-off' or \ - 'highlight2' == sickgear.FANART_PANEL and 'highlight1' or \ - 'highlight1' == sickgear.FANART_PANEL and 'highlight' or 'highlight-off' + 'highlight2' == sickgear.FANART_PANEL and 'highlight1' or \ + 'highlight1' == sickgear.FANART_PANEL and 'highlight' or 'highlight-off' elif 'viewmode' in kwargs: sickgear.EPISODE_VIEW_VIEWMODE = viewmode @@ -1395,7 +1393,7 @@ r.close() if data: my_db = db.DBConnection(row_type='dict') - media_paths = map_list(lambda arg: ek.ek(os.path.basename, arg[1]['path_file']), iteritems(data)) + media_paths = map_list(lambda arg: os.path.basename(arg[1]['path_file']), iteritems(data)) def chunks(lines, n): for c in range(0, len(lines), n): @@ -1412,13 +1410,13 @@ r.close() cl = [] ep_results = {} - map_consume(lambda r: ep_results.update({'%s' % ek.ek(os.path.basename, r['location']).lower(): dict( + map_consume(lambda r: ep_results.update({'%s' % os.path.basename(r['location']).lower(): dict( episode_id=r['episode_id'], status=r['status'], location=r['location'], file_size=r['file_size'])}), sql_result) for (k, v) in iteritems(data): - bname = (ek.ek(os.path.basename, v.get('path_file')) or '').lower() + bname = (os.path.basename(v.get('path_file')) or '').lower() if not bname: msg = 'Missing media file name provided' data[k] = msg @@ -1581,15 +1579,15 @@ class Home(MainHandler): if 'simple' != sickgear.HOME_LAYOUT: t.network_images = {} networks = {} - images_path = ek.ek(os.path.join, sickgear.PROG_DIR, 'gui', 'slick', 'images', 'network') + images_path = os.path.join(sickgear.PROG_DIR, 'gui', 'slick', 'images', 'network') for cur_show_obj in sickgear.showList: network_name = 'nonetwork' if None is cur_show_obj.network \ else cur_show_obj.network.replace(u'\u00C9', 'e').lower() if network_name not in networks: filename = u'%s.png' % network_name - if not ek.ek(os.path.isfile, ek.ek(os.path.join, images_path, filename)): + if not os.path.isfile(os.path.join(images_path, filename)): filename = u'%s.png' % re.sub(r'(?m)(.*)\s+\(\w{2}\)$', r'\1', network_name) - if not ek.ek(os.path.isfile, ek.ek(os.path.join, images_path, filename)): + if not os.path.isfile(os.path.join(images_path, filename)): filename = u'nonetwork.png' networks.setdefault(network_name, filename) t.network_images.setdefault(cur_show_obj.tvid_prodid, networks[network_name]) @@ -2141,25 +2139,25 @@ class Home(MainHandler): show_message = [] - if sickgear.show_queue_scheduler.action.isBeingAdded(show_obj): + if sickgear.show_queue_scheduler.action.is_being_added(show_obj): show_message = ['Downloading this show, the information below is incomplete'] - elif sickgear.show_queue_scheduler.action.isBeingUpdated(show_obj): + elif sickgear.show_queue_scheduler.action.is_being_updated(show_obj): show_message = ['Updating information for this show'] - elif sickgear.show_queue_scheduler.action.isBeingRefreshed(show_obj): + elif sickgear.show_queue_scheduler.action.is_being_refreshed(show_obj): show_message = ['Refreshing episodes from disk for this show'] - elif sickgear.show_queue_scheduler.action.isBeingSubtitled(show_obj): + elif sickgear.show_queue_scheduler.action.is_being_subtitled(show_obj): show_message = ['Downloading subtitles for this show'] - elif sickgear.show_queue_scheduler.action.isInRefreshQueue(show_obj): + elif sickgear.show_queue_scheduler.action.is_in_refresh_queue(show_obj): show_message = ['Refresh queued for this show'] - elif sickgear.show_queue_scheduler.action.isInUpdateQueue(show_obj): + elif sickgear.show_queue_scheduler.action.is_in_update_queue(show_obj): show_message = ['Update queued for this show'] - elif sickgear.show_queue_scheduler.action.isInSubtitleQueue(show_obj): + elif sickgear.show_queue_scheduler.action.is_in_subtitle_queue(show_obj): show_message = ['Subtitle download queued for this show'] if sickgear.show_queue_scheduler.action.is_show_being_switched(show_obj): @@ -2185,8 +2183,8 @@ class Home(MainHandler): show_message = '.
'.join(show_message) t.force_update = 'home/update-show?tvid_prodid=%s&force=1&web=1' % tvid_prodid - if not sickgear.show_queue_scheduler.action.isBeingAdded(show_obj): - if not sickgear.show_queue_scheduler.action.isBeingUpdated(show_obj): + if not sickgear.show_queue_scheduler.action.is_being_added(show_obj): + if not sickgear.show_queue_scheduler.action.is_being_updated(show_obj): t.submenu.append( {'title': 'Remove', 'path': 'home/delete-show?tvid_prodid=%s' % tvid_prodid, 'confirm': True}) @@ -2211,7 +2209,7 @@ class Home(MainHandler): t.submenu.append( {'title': 'Media Rename', 'path': 'home/rename-media?tvid_prodid=%s' % tvid_prodid}) - if sickgear.USE_SUBTITLES and not sickgear.show_queue_scheduler.action.isBeingSubtitled( + if sickgear.USE_SUBTITLES and not sickgear.show_queue_scheduler.action.is_being_subtitled( show_obj) and show_obj.subtitles: t.submenu.append( {'title': 'Download Subtitles', @@ -2355,8 +2353,7 @@ class Home(MainHandler): t.fanart = [] cache_obj = image_cache.ImageCache() - for img in ek.ek(glob.glob, - cache_obj.fanart_path(show_obj.tvid, show_obj.prodid).replace('fanart.jpg', '*')) or []: + for img in glob.glob(cache_obj.fanart_path(show_obj.tvid, show_obj.prodid).replace('fanart.jpg', '*')) or []: match = re.search(r'(\d+(?:\.(\w*?(\d*)))?\.\w{5,8})\.fanart\.', img, re.I) if match and match.group(1): t.fanart += [(match.group(1), @@ -2544,8 +2541,8 @@ class Home(MainHandler): show_obj = helpers.find_show_by_id({tvid: prodid}, no_mapped_ids=True) try: sickgear.show_queue_scheduler.action.switch_show(show_obj=show_obj, new_tvid=m_tvid, - new_prodid=m_prodid, force_id=True, - set_pause=set_pause, mark_wanted=mark_wanted) + new_prodid=m_prodid, force_id=True, + set_pause=set_pause, mark_wanted=mark_wanted) except (BaseException, Exception) as e: logger.log('Could not add show %s to switch queue: %s' % (show_obj.tvid_prodid, ex(e)), logger.WARNING) @@ -2666,7 +2663,7 @@ class Home(MainHandler): t.fanart = [] cache_obj = image_cache.ImageCache() show_obj = getattr(t, 'show_obj', None) or getattr(t, 'show', None) - for img in ek.ek(glob.glob, cache_obj.fanart_path( + for img in glob.glob(cache_obj.fanart_path( show_obj.tvid, show_obj.prodid).replace('fanart.jpg', '*')) or []: match = re.search(r'(\d+(?:\.(\w*?(\d*)))?\.\w{5,8})\.fanart\.', img, re.I) if match and match.group(1): @@ -2849,7 +2846,7 @@ class Home(MainHandler): if bool(show_obj.flatten_folders) != bool(flatten_folders): show_obj.flatten_folders = flatten_folders try: - sickgear.show_queue_scheduler.action.refreshShow(show_obj) + sickgear.show_queue_scheduler.action.refresh_show(show_obj) except exceptions_helper.CantRefreshException as e: errors.append('Unable to refresh this show: ' + ex(e)) @@ -2894,11 +2891,11 @@ class Home(MainHandler): # if we change location clear the db of episodes, change it, write to db, and rescan # noinspection PyProtectedMember - old_path = ek.ek(os.path.normpath, show_obj._location) - new_path = ek.ek(os.path.normpath, location) + old_path = os.path.normpath(show_obj._location) + new_path = os.path.normpath(location) if old_path != new_path: logger.log(u'%s != %s' % (old_path, new_path), logger.DEBUG) - if not ek.ek(os.path.isdir, new_path) and not sickgear.CREATE_MISSING_SHOW_DIRS: + if not os.path.isdir(new_path) and not sickgear.CREATE_MISSING_SHOW_DIRS: errors.append(u'New location %s does not exist' % new_path) # don't bother if we're going to update anyway @@ -2907,7 +2904,7 @@ class Home(MainHandler): try: show_obj.location = new_path try: - sickgear.show_queue_scheduler.action.refreshShow(show_obj) + sickgear.show_queue_scheduler.action.refresh_show(show_obj) except exceptions_helper.CantRefreshException as e: errors.append('Unable to refresh this show:' + ex(e)) # grab updated info from TVDB @@ -2924,7 +2921,7 @@ class Home(MainHandler): # force the update if do_update: try: - sickgear.show_queue_scheduler.action.updateShow(show_obj, True) + sickgear.show_queue_scheduler.action.update_show(show_obj, True) helpers.cpu_sleep() except exceptions_helper.CantUpdateException: errors.append('Unable to force an update on the show.') @@ -2962,8 +2959,8 @@ class Home(MainHandler): if None is show_obj: return self._generic_message('Error', 'Unable to find the specified show') - if sickgear.show_queue_scheduler.action.isBeingAdded( - show_obj) or sickgear.show_queue_scheduler.action.isBeingUpdated(show_obj): + if sickgear.show_queue_scheduler.action.is_being_added( + show_obj) or sickgear.show_queue_scheduler.action.is_being_updated(show_obj): return self._generic_message("Error", "Shows can't be deleted while they're being added or updated.") # if sickgear.USE_TRAKT and sickgear.TRAKT_SYNC: @@ -3008,7 +3005,7 @@ class Home(MainHandler): # force the update from the DB try: - sickgear.show_queue_scheduler.action.refreshShow(show_obj) + sickgear.show_queue_scheduler.action.refresh_show(show_obj) except exceptions_helper.CantRefreshException as e: ui.notifications.error('Unable to refresh this show.', ex(e)) @@ -3028,7 +3025,7 @@ class Home(MainHandler): # force the update try: - sickgear.show_queue_scheduler.action.updateShow(show_obj, bool(force), bool(web)) + sickgear.show_queue_scheduler.action.update_show(show_obj, bool(force), bool(web)) except exceptions_helper.CantUpdateException as e: ui.notifications.error('Unable to update this show.', ex(e)) @@ -3153,7 +3150,7 @@ class Home(MainHandler): elif status in Quality.DOWNLOADED \ and ep_obj.status not in required + Quality.ARCHIVED + [IGNORED, SKIPPED] \ - and not ek.ek(os.path.isfile, ep_obj.location): + and not os.path.isfile(ep_obj.location): err_msg = 'to downloaded because it\'s not snatched/downloaded/archived' if err_msg: @@ -4061,7 +4058,7 @@ class AddShows(Home): any(ids_to_search[si] == results[cur_tvid][tv_src_id].get('ids', {})[si] for si in ids_to_search): ids_search_used.update({k: v for k, v in iteritems( - results[cur_tvid][tv_src_id].get('ids',{})) + results[cur_tvid][tv_src_id].get('ids', {})) if v and k not in iterkeys(ids_to_search)}) results[cur_tvid][tv_src_id]['rename_suggest'] = '' \ if not results[cur_tvid][tv_src_id]['firstaired'] \ @@ -4110,7 +4107,8 @@ class AddShows(Home): show['seriesname'], helpers.xhtml_escape(show['seriesname']), show['firstaired'], (isinstance(show['firstaired'], string_types) and SGDatetime.sbfdate(_parse_date(show['firstaired'])) or ''), - show.get('network', '') or '', (show.get('genres', '') or show.get('genre', '') or '').replace('|', ', '), # 11 - 12 + show.get('network', '') or '', # 11 + (show.get('genres', '') or show.get('genre', '') or '').replace('|', ', '), # 12 show.get('language', ''), show.get('language_country_code') or '', # 13 - 14 re.sub(r'([,.!][^,.!]*?)$', '...', re.sub(r'([.!?])(?=\w)', r'\1 ', @@ -4275,7 +4273,7 @@ class AddShows(Home): try: for cur_dir in scantree(cur_root_dir, filter_kind=True, recurse=False): - normpath = ek.ek(os.path.normpath, cur_dir.path) + normpath = os.path.normpath(cur_dir.path) highlight = hash_dir == re.sub('[^a-z]', '', sg_helpers.md5_for_text(normpath)) if hash_dir: display_one_dir = highlight @@ -4318,7 +4316,7 @@ class AddShows(Home): if display_one_dir and not cur_data['highlight'][cur_enum]: continue - dir_item = dict(normpath=cur_normpath, rootpath='%s%s' % (ek.ek(os.path.dirname, cur_normpath), os.sep), + dir_item = dict(normpath=cur_normpath, rootpath='%s%s' % (os.path.dirname(cur_normpath), os.sep), name=cur_data['name'][cur_enum], added_already=any(cur_data['exists'][cur_enum]), highlight=cur_data['highlight'][cur_enum]) @@ -4330,7 +4328,7 @@ class AddShows(Home): if prodid and show_name: break - (tvid, prodid, show_name) = cur_provider.retrieveShowMetadata(cur_normpath) + (tvid, prodid, show_name) = cur_provider.retrieve_show_metadata(cur_normpath) # default to TVDB if TV info src was not detected if show_name and (not tvid or not prodid): @@ -4376,7 +4374,7 @@ class AddShows(Home): elif not show_dir: t.default_show_name = '' elif not show_name: - t.default_show_name = ek.ek(os.path.basename, ek.ek(os.path.normpath, show_dir)).replace('.', ' ') + t.default_show_name = os.path.basename(os.path.normpath(show_dir)).replace('.', ' ') else: t.default_show_name = show_name @@ -5948,7 +5946,7 @@ class AddShows(Home): tvid, void, prodid, show_name = self.split_extra_show(which_series) if bool(helpers.try_int(cancel_form)): tvid = tvid or provided_tvid or '0' - prodid = re.findall(r'tvid_prodid=[^%s]+%s([\d]+)' % tuple(2 * [TVidProdid.glue]), return_to)[0] + prodid = re.findall(r'tvid_prodid=[^%s]+%s(\d+)' % tuple(2 * [TVidProdid.glue]), return_to)[0] return self.redirect(return_to % (tvid, prodid)) # grab our list of other dirs if given @@ -6001,14 +5999,14 @@ class AddShows(Home): # use the whole path if it's given, or else append the show name to the root dir to get the full show path if full_show_path: - show_dir = ek.ek(os.path.normpath, full_show_path) + show_dir = os.path.normpath(full_show_path) new_show = False else: show_dir = helpers.generate_show_dir_name(root_dir, show_name) new_show = True # if the dir exists, do 'add existing show' - if ek.ek(os.path.isdir, show_dir) and not full_show_path: + if os.path.isdir(show_dir) and not full_show_path: ui.notifications.error('Unable to add show', u'Found existing folder: ' + show_dir) return self.redirect( '/add-shows/import?tvid_prodid=%s%s%s&hash_dir=%s%s' % @@ -6691,7 +6689,7 @@ class Manage(MainHandler): for cur_show_obj in show_list: # noinspection PyProtectedMember - cur_root_dir = ek.ek(os.path.dirname, cur_show_obj._location) + cur_root_dir = os.path.dirname(cur_show_obj._location) if cur_root_dir not in root_dir_list: root_dir_list.append(cur_root_dir) @@ -6817,11 +6815,11 @@ class Manage(MainHandler): continue # noinspection PyProtectedMember - cur_root_dir = ek.ek(os.path.dirname, show_obj._location) + cur_root_dir = os.path.dirname(show_obj._location) # noinspection PyProtectedMember - cur_show_dir = ek.ek(os.path.basename, show_obj._location) + cur_show_dir = os.path.basename(show_obj._location) if cur_root_dir in dir_map and cur_root_dir != dir_map[cur_root_dir]: - new_show_dir = ek.ek(os.path.join, dir_map[cur_root_dir], cur_show_dir) + new_show_dir = os.path.join(dir_map[cur_root_dir], cur_show_dir) if 'nt' != os.name and ':\\' in cur_show_dir: # noinspection PyProtectedMember cur_show_dir = show_obj._location.split('\\')[-1] @@ -6829,7 +6827,7 @@ class Manage(MainHandler): base_dir = dir_map[cur_root_dir].rsplit(cur_show_dir)[0].rstrip('/') except IndexError: base_dir = dir_map[cur_root_dir] - new_show_dir = ek.ek(os.path.join, base_dir, cur_show_dir) + new_show_dir = os.path.join(base_dir, cur_show_dir) # noinspection PyProtectedMember logger.log(u'For show %s changing dir from %s to %s' % (show_obj.unique_name, show_obj._location, new_show_dir)) @@ -6945,20 +6943,20 @@ class Manage(MainHandler): else: if cur_tvid_prodid in to_update: try: - sickgear.show_queue_scheduler.action.updateShow(cur_show_obj, True, True) + sickgear.show_queue_scheduler.action.update_show(cur_show_obj, True, True) update.append(cur_show_obj.name) except exceptions_helper.CantUpdateException as e: errors.append('Unable to update show %s: %s' % (cur_show_obj.unique_name, ex(e))) elif cur_tvid_prodid in to_refresh: try: - sickgear.show_queue_scheduler.action.refreshShow(cur_show_obj) + sickgear.show_queue_scheduler.action.refresh_show(cur_show_obj) refresh.append(cur_show_obj.name) except exceptions_helper.CantRefreshException as e: errors.append('Unable to refresh show %s: %s' % (cur_show_obj.unique_name, ex(e))) if cur_tvid_prodid in to_rename: - sickgear.show_queue_scheduler.action.renameShowEpisodes(cur_show_obj) + sickgear.show_queue_scheduler.action.rename_show_episodes(cur_show_obj) rename.append(cur_show_obj.name) if sickgear.USE_SUBTITLES and cur_tvid_prodid in to_subtitle: @@ -7067,7 +7065,7 @@ class Manage(MainHandler): continue try: sickgear.show_queue_scheduler.action.switch_show(show_obj=show_obj, new_tvid=new_tvid, - new_prodid=new_prodid, force_id=force_id) + new_prodid=new_prodid, force_id=force_id) except (BaseException, Exception) as e: logger.log('Could not add show %s to switch queue: %s' % (show_obj.tvid_prodid, ex(e)), logger.WARNING) errors.append('Could not add show %s to switch queue: %s' % (show_obj.tvid_prodid, ex(e))) @@ -7172,7 +7170,7 @@ class ShowTasks(Manage): t.people_queue = sickgear.people_queue_scheduler.action.queue_data() t.next_run = sickgear.show_update_scheduler.lastRun.replace( hour=sickgear.show_update_scheduler.start_time.hour) - t.show_update_running = sickgear.show_queue_scheduler.action.isShowUpdateRunning() \ + t.show_update_running = sickgear.show_queue_scheduler.action.is_show_update_running() \ or sickgear.show_update_scheduler.action.amActive my_db = db.DBConnection(row_type='dict') @@ -7613,8 +7611,7 @@ class History(MainHandler): rd = sickgear.ROOT_DIRS.split('|')[1:] \ + [x.split('=')[0] for x in sickgear.EMBY_PARENT_MAPS.split(',') if any(x)] - rootpaths = sorted( - ['%s%s' % (ek.ek(os.path.splitdrive, x)[1], os.path.sep) for x in rd], key=len, reverse=True) + rootpaths = sorted(['%s%s' % (os.path.splitdrive(x)[1], os.path.sep) for x in rd], key=len, reverse=True) rootdirs = sorted([x for x in rd], key=len, reverse=True) headers = {'Content-type': 'application/json'} states = {} @@ -7667,8 +7664,8 @@ class History(MainHandler): continue for index, p in enumerate(rootpaths): if p in path_file: - path_file = ek.ek(os.path.join, rootdirs[index], - re.sub('.*?%s' % re.escape(p), '', path_file)) + path_file = os.path.join( + rootdirs[index], re.sub('.*?%s' % re.escape(p), '', path_file)) root_dir_found = True break if not root_dir_found: @@ -7701,11 +7698,11 @@ class History(MainHandler): if states: # Prune user removed items that are no longer being returned by API - media_paths = map_list(lambda arg: ek.ek(os.path.basename, arg[1]['path_file']), iteritems(states)) + media_paths = map_list(lambda arg: os.path.basename(arg[1]['path_file']), iteritems(states)) sql = 'FROM tv_episodes_watched WHERE hide=1 AND label LIKE "%%{Emby}"' my_db = db.DBConnection(row_type='dict') files = my_db.select('SELECT location %s' % sql) - for i in filter_iter(lambda f: ek.ek(os.path.basename, f['location']) not in media_paths, files): + for i in filter_iter(lambda f: os.path.basename(f['location']) not in media_paths, files): loc = i.get('location') if loc: my_db.select('DELETE %s AND location="%s"' % (sql, loc)) @@ -7770,11 +7767,11 @@ class History(MainHandler): if states: # Prune user removed items that are no longer being returned by API - media_paths = map_list(lambda arg: ek.ek(os.path.basename, arg[1]['path_file']), iteritems(states)) + media_paths = map_list(lambda arg: os.path.basename(arg[1]['path_file']), iteritems(states)) sql = 'FROM tv_episodes_watched WHERE hide=1 AND label LIKE "%%{Plex}"' my_db = db.DBConnection(row_type='dict') files = my_db.select('SELECT location %s' % sql) - for i in filter_iter(lambda f: ek.ek(os.path.basename, f['location']) not in media_paths, files): + for i in filter_iter(lambda f: os.path.basename(f['location']) not in media_paths, files): loc = i.get('location') if loc: my_db.select('DELETE %s AND location="%s"' % (sql, loc)) @@ -7812,7 +7809,7 @@ class History(MainHandler): refresh = [] for cur_result in sql_result: if files and cur_result['location'] not in attempted and 0 < helpers.get_size(cur_result['location']) \ - and ek.ek(os.path.isfile, cur_result['location']): + and os.path.isfile(cur_result['location']): # locations repeat with watch events but attempt to delete once attempted += [cur_result['location']] @@ -7855,7 +7852,7 @@ class History(MainHandler): for tvid_prodid_dict in refresh: try: - sickgear.show_queue_scheduler.action.refreshShow( + sickgear.show_queue_scheduler.action.refresh_show( helpers.find_show_by_id(tvid_prodid_dict)) except (BaseException, Exception): pass @@ -7899,7 +7896,7 @@ class Config(MainHandler): t.submenu = self.config_menu() try: - with open(ek.ek(os.path.join, sickgear.PROG_DIR, 'CHANGES.md')) as fh: + with open(os.path.join(sickgear.PROG_DIR, 'CHANGES.md')) as fh: t.version = re.findall(r'###[^0-9]+([0-9]+\.[0-9]+\.[0-9x]+)', fh.readline())[0] except (BaseException, Exception): t.version = '' @@ -7909,18 +7906,18 @@ class Config(MainHandler): t.tz_version = None try: if None is not current_file: - current_file = ek.ek(os.path.basename, current_file) - zonefile = real_path(ek.ek(os.path.join, sickgear.ZONEINFO_DIR, current_file)) - if not ek.ek(os.path.isfile, zonefile): + current_file = os.path.basename(current_file) + zonefile = real_path(os.path.join(sickgear.ZONEINFO_DIR, current_file)) + if not os.path.isfile(zonefile): t.tz_fallback = True - zonefile = ek.ek(os.path.join, ek.ek(os.path.dirname, zoneinfo.__file__), current_file) - if ek.ek(os.path.isfile, zonefile): + zonefile = os.path.join(os.path.dirname(zoneinfo.__file__), current_file) + if os.path.isfile(zonefile): t.tz_version = zoneinfo.ZoneInfoFile(zoneinfo.getzoneinfofile_stream()).metadata['tzversion'] except (BaseException, Exception): pass t.backup_db_path = sickgear.BACKUP_DB_MAX_COUNT and \ - (sickgear.BACKUP_DB_PATH or ek.ek(os.path.join, sickgear.DATA_DIR, 'backup')) or 'Disabled' + (sickgear.BACKUP_DB_PATH or os.path.join(sickgear.DATA_DIR, 'backup')) or 'Disabled' return t.respond() @@ -8058,7 +8055,7 @@ class ConfigGeneral(Config): best_qualities = ([], best_qualities.split(','))[any(best_qualities)] sickgear.QUALITY_DEFAULT = int(Quality.combineQualities(map_list(int, any_qualities), - map_list(int, best_qualities))) + map_list(int, best_qualities))) sickgear.WANTED_BEGIN_DEFAULT = config.minimax(default_wanted_begin, 0, -1, 10) sickgear.WANTED_LATEST_DEFAULT = config.minimax(default_wanted_latest, 0, -1, 10) sickgear.SHOW_TAG_DEFAULT = default_tag @@ -8114,7 +8111,7 @@ class ConfigGeneral(Config): result.update(dict(result='Success: apikey added', added=api_key)) sickgear.USE_API = 1 sickgear.save_config() - ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickgear.CONFIG_FILE)) + ui.notifications.message('Configuration Saved', os.path.join(sickgear.CONFIG_FILE)) return json_dumps(result) @@ -8132,7 +8129,7 @@ class ConfigGeneral(Config): logger.log('Revoked [%s] apikey [%s]' % (app_name, api_key), logger.DEBUG) result.update(dict(result='Success: apikey removed', removed=True)) sickgear.save_config() - ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickgear.CONFIG_FILE)) + ui.notifications.message('Configuration Saved', os.path.join(sickgear.CONFIG_FILE)) return json_dumps(result) @@ -8287,7 +8284,7 @@ class ConfigGeneral(Config): ui.notifications.error('Error(s) Saving Configuration', '
\n'.join(results)) else: - ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickgear.CONFIG_FILE)) + ui.notifications.message('Configuration Saved', os.path.join(sickgear.CONFIG_FILE)) if restart: self.clear_cookie('sickgear-session-%s' % helpers.md5_for_text(sickgear.WEB_PORT)) @@ -8458,7 +8455,7 @@ class ConfigSearch(Config): ui.notifications.error('Error(s) Saving Configuration', '
\n'.join(results)) else: - ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickgear.CONFIG_FILE)) + ui.notifications.message('Configuration Saved', os.path.join(sickgear.CONFIG_FILE)) self.redirect('/config/search/') @@ -8585,7 +8582,7 @@ class ConfigMediaProcess(Config): ui.notifications.error('Error(s) Saving Configuration', '
\n'.join(results)) else: - ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickgear.CONFIG_FILE)) + ui.notifications.message('Configuration Saved', os.path.join(sickgear.CONFIG_FILE)) self.redirect('/config/media-process/') @@ -8600,7 +8597,7 @@ class ConfigMediaProcess(Config): result = naming.test_name(pattern, multi, abd, sports, anime, anime_type) - result = ek.ek(os.path.join, result['dir'], result['name']) + result = os.path.join(result['dir'], result['name']) return result @@ -8647,8 +8644,8 @@ class ConfigMediaProcess(Config): try: if 'win32' == sys.platform: - rarfile.UNRAR_TOOL = ek.ek(os.path.join, sickgear.PROG_DIR, 'lib', 'rarfile', 'UnRAR.exe') - rar_path = ek.ek(os.path.join, sickgear.PROG_DIR, 'lib', 'rarfile', 'test.rar') + rarfile.UNRAR_TOOL = os.path.join(sickgear.PROG_DIR, 'lib', 'rarfile', 'UnRAR.exe') + rar_path = os.path.join(sickgear.PROG_DIR, 'lib', 'rarfile', 'test.rar') if 'This is only a test.' == decode_str(rarfile.RarFile(rar_path).read(r'test/test.txt')): return 'supported' msg = 'Could not read test file content' @@ -8998,7 +8995,7 @@ class ConfigProviders(Config): logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '
\n'.join(results)) else: - ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickgear.CONFIG_FILE)) + ui.notifications.message('Configuration Saved', os.path.join(sickgear.CONFIG_FILE)) if reload_page: self.write('reload') @@ -9266,7 +9263,7 @@ class ConfigNotifications(Config): ui.notifications.error('Error(s) Saving Configuration', '
\n'.join(results)) else: - ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickgear.CONFIG_FILE)) + ui.notifications.message('Configuration Saved', os.path.join(sickgear.CONFIG_FILE)) self.redirect('/config/notifications/') @@ -9321,7 +9318,7 @@ class ConfigSubtitles(Config): ui.notifications.error('Error(s) Saving Configuration', '
\n'.join(results)) else: - ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickgear.CONFIG_FILE)) + ui.notifications.message('Configuration Saved', os.path.join(sickgear.CONFIG_FILE)) self.redirect('/config/subtitles/') @@ -9354,7 +9351,7 @@ class ConfigAnime(Config): ui.notifications.error('Error(s) Saving Configuration', '
\n'.join(results)) else: - ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickgear.CONFIG_FILE)) + ui.notifications.message('Configuration Saved', os.path.join(sickgear.CONFIG_FILE)) self.redirect('/config/anime/') @@ -9410,7 +9407,7 @@ class EventLogs(MainHandler): min_level = int(min_level) - regex = re.compile(r'^\d{4}-\d{2}-\d{2}\s*\d{2}:\d{2}:\d{2}\s*([A-Z]+)\s*([^\s]+)\s+:{2}\s*(.*\r?\n)$') + regex = re.compile(r'^\d{4}-\d{2}-\d{2}\s*\d{2}:\d{2}:\d{2}\s*([A-Z]+)\s*(\S+)\s+:{2}\s*(.*\r?\n)$') final_data = [] normal_data = [] @@ -9578,9 +9575,9 @@ class CachedImages(MainHandler): def should_try_image(filename, source, days=1, minutes=0): result = True try: - dummy_file = '%s.%s.dummy' % (ek.ek(os.path.splitext, filename)[0], source) - if ek.ek(os.path.isfile, dummy_file): - if ek.ek(os.stat, dummy_file).st_mtime \ + dummy_file = '%s.%s.dummy' % (os.path.splitext(filename)[0], source) + if os.path.isfile(dummy_file): + if os.stat(dummy_file).st_mtime \ < (int(timestamp_near((datetime.datetime.now() - datetime.timedelta(days=days, minutes=minutes))))): CachedImages.delete_dummy_image(dummy_file) @@ -9592,7 +9589,7 @@ class CachedImages(MainHandler): @staticmethod def create_dummy_image(filename, source): - dummy_file = '%s.%s.dummy' % (ek.ek(os.path.splitext, filename)[0], source) + dummy_file = '%s.%s.dummy' % (os.path.splitext(filename)[0], source) CachedImages.delete_dummy_image(dummy_file) try: with open(dummy_file, 'w'): @@ -9603,28 +9600,28 @@ class CachedImages(MainHandler): @staticmethod def delete_dummy_image(dummy_file): try: - if ek.ek(os.path.isfile, dummy_file): - ek.ek(os.remove, dummy_file) + if os.path.isfile(dummy_file): + os.remove(dummy_file) except (BaseException, Exception): pass @staticmethod def delete_all_dummy_images(filename): for f in ['tmdb', 'tvdb', 'tvmaze']: - CachedImages.delete_dummy_image('%s.%s.dummy' % (ek.ek(os.path.splitext, filename)[0], f)) + CachedImages.delete_dummy_image('%s.%s.dummy' % (os.path.splitext(filename)[0], f)) def index(self, path='', source=None, filename=None, tmdbid=None, tvdbid=None, trans=True): path = path.strip('/') file_name = '' if None is not source: - file_name = ek.ek(os.path.basename, source) + file_name = os.path.basename(source) elif filename not in [None, 0, '0']: file_name = filename - image_file = ek.ek(os.path.join, sickgear.CACHE_DIR, 'images', path, file_name) - image_file = ek.ek(os.path.abspath, image_file.replace('\\', '/')) - if not ek.ek(os.path.isfile, image_file) and has_image_ext(file_name): - basepath = ek.ek(os.path.dirname, image_file) + image_file = os.path.join(sickgear.CACHE_DIR, 'images', path, file_name) + image_file = os.path.abspath(image_file.replace('\\', '/')) + if not os.path.isfile(image_file) and has_image_ext(file_name): + basepath = os.path.dirname(image_file) helpers.make_path(basepath) poster_url = '' tmdb_image = False @@ -9641,13 +9638,15 @@ class CachedImages(MainHandler): poster_url = show_obj.poster except (BaseException, Exception): poster_url = '' - if poster_url and not sg_helpers.download_file(poster_url, image_file, nocache=True) and poster_url.find('trakt.us'): + if poster_url \ + and not sg_helpers.download_file(poster_url, image_file, nocache=True) \ + and poster_url.find('trakt.us'): sg_helpers.download_file(poster_url.replace('trakt.us', 'trakt.tv'), image_file, nocache=True) - if tmdb_image and not ek.ek(os.path.isfile, image_file): + if tmdb_image and not os.path.isfile(image_file): self.create_dummy_image(image_file, 'tmdb') if None is source and tvdbid not in [None, 'None', 0, '0'] \ - and not ek.ek(os.path.isfile, image_file) \ + and not os.path.isfile(image_file) \ and self.should_try_image(image_file, 'tvdb'): try: tvinfo_config = sickgear.TVInfoAPI(TVINFO_TVDB).api_params.copy() @@ -9660,15 +9659,15 @@ class CachedImages(MainHandler): poster_url = '' if poster_url: sg_helpers.download_file(poster_url, image_file, nocache=True) - if not ek.ek(os.path.isfile, image_file): + if not os.path.isfile(image_file): self.create_dummy_image(image_file, 'tvdb') - if ek.ek(os.path.isfile, image_file): + if os.path.isfile(image_file): self.delete_all_dummy_images(image_file) - if not ek.ek(os.path.isfile, image_file): - image_file = ek.ek(os.path.join, sickgear.PROG_DIR, 'gui', 'slick', - 'images', ('image-light.png', 'trans.png')[bool(int(trans))]) + if not os.path.isfile(image_file): + image_file = os.path.join(sickgear.PROG_DIR, 'gui', 'slick', 'images', + ('image-light.png', 'trans.png')[bool(int(trans))]) else: helpers.set_file_timestamp(image_file, min_age=3, new_time=None) @@ -9683,8 +9682,8 @@ class CachedImages(MainHandler): :param filename: image file name with path :param days: max age to trigger reload of image """ - if not ek.ek(os.path.isfile, filename) or \ - ek.ek(os.stat, filename).st_mtime < \ + if not os.path.isfile(filename) or \ + os.stat(filename).st_mtime < \ (int(timestamp_near((datetime.datetime.now() - datetime.timedelta(days=days))))): return True return False @@ -9735,9 +9734,9 @@ class CachedImages(MainHandler): sg_helpers.download_file(char_obj.thumb_url, image_thumb, nocache=True) primary, fallback = ((image_normal, image_thumb), (image_thumb, image_normal))[thumb] - if ek.ek(os.path.isfile, primary): + if os.path.isfile(primary): image_file = primary - elif ek.ek(os.path.isfile, fallback): + elif os.path.isfile(fallback): image_file = fallback elif person_id: @@ -9773,9 +9772,9 @@ class CachedImages(MainHandler): sg_helpers.download_file(person_obj.thumb_url, image_thumb, nocache=True) primary, fallback = ((image_normal, image_thumb), (image_thumb, image_normal))[thumb] - if ek.ek(os.path.isfile, primary): + if os.path.isfile(primary): image_file = primary - elif ek.ek(os.path.isfile, fallback): + elif os.path.isfile(fallback): image_file = fallback return self.image_data(image_file, cast_default=True) @@ -9790,7 +9789,7 @@ class CachedImages(MainHandler): :return: binary image data or None """ if cast_default and None is image_file: - image_file = ek.ek(os.path.join, sickgear.PROG_DIR, 'gui', 'slick', 'images', 'poster-person.jpg') + image_file = os.path.join(sickgear.PROG_DIR, 'gui', 'slick', 'images', 'poster-person.jpg') mime_type, encoding = MimeTypes().guess_type(image_file) self.set_header('Content-Type', mime_type) diff --git a/tests/network_timezone_tests.py b/tests/network_timezone_tests.py index dc757af8..886f5f1e 100644 --- a/tests/network_timezone_tests.py +++ b/tests/network_timezone_tests.py @@ -10,8 +10,6 @@ import datetime from lib.dateutil import tz import sickgear from sickgear import network_timezones, helpers -# noinspection PyPep8Naming -import encodingKludge as ek class NetworkTimezoneTests(test.SickbeardTestDBCase): @@ -33,12 +31,12 @@ class NetworkTimezoneTests(test.SickbeardTestDBCase): @classmethod def remove_zoneinfo(cls): # delete all existing zoneinfo files - for (path, dirs, files) in ek.ek(os.walk, helpers.real_path(sickgear.ZONEINFO_DIR)): + for (path, dirs, files) in os.walk(helpers.real_path(sickgear.ZONEINFO_DIR)): for filename in files: if filename.endswith('.tar.gz'): - file_w_path = ek.ek(os.path.join, path, filename) + file_w_path = os.path.join(path, filename) try: - ek.ek(os.remove, file_w_path) + os.remove(file_w_path) except (BaseException, Exception): pass diff --git a/tests/scene_helpers_tests.py b/tests/scene_helpers_tests.py index 7cbe6257..2827522b 100644 --- a/tests/scene_helpers_tests.py +++ b/tests/scene_helpers_tests.py @@ -26,7 +26,7 @@ class SceneTests(test.SickbeardTestDBCase): s.tvid = TVINFO_TVDB s.name = name - result = show_name_helpers.allPossibleShowNames(s, season=season) + result = show_name_helpers.all_possible_show_names(s, season=season) self.assertTrue(len(set(expected).intersection(set(result))) == len(expected)) def _test_pass_wordlist_checks(self, name, expected): From cf60bb5a91678cdaf2dd00067b4cc0853f31b6c3 Mon Sep 17 00:00:00 2001 From: JackDandy Date: Thu, 9 Feb 2023 19:22:56 +0000 Subject: [PATCH 02/21] Change requirements for pure py3. --- CHANGES.md | 1 + recommended.txt | 32 +++++++++++++------------------- requirements.txt | 3 +-- 3 files changed, 15 insertions(+), 21 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9e068fbd..b2ee095e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,7 @@ ### 3.28.0 (2023-xx-xx xx:xx:00 UTC) * Change remove calls to legacy py2 fix encoding function +* Change requirements for pure py3 ### 3.27.2 (2023-02-10 19:25:00 UTC) diff --git a/recommended.txt b/recommended.txt index fab6cb83..726b5ba7 100644 --- a/recommended.txt +++ b/recommended.txt @@ -1,22 +1,16 @@ --extra-index-url https://gitlab+deploy-token-1599941:UNupqjtDab_zxNzvP2gA@gitlab.com/api/v4/projects/279215/packages/pypi/simple -cffi >= 1.15.1 -cryptography != 38.0.2; '3.7' <= python_version and 'Windows' == platform_system -cryptography != 38.0.2; '3.7' <= python_version and 'Linux' == platform_system and ('x86_64' == platform_machine or 'aarch64' == platform_machine) -cryptography <= 3.3.2; '3.7' <= python_version and 'Linux' == platform_system and 'x86_64' != platform_machine and 'aarch64' != platform_machine -cryptography != 38.0.2; '3.7' <= python_version and 'Darwin' == platform_system and 'x86_64' == platform_machine -cryptography <= 3.3.2; '3.7' <= python_version and 'Darwin' == platform_system and 'x86_64' != platform_machine -cryptography <= 3.3.2; '3.0' > python_version +cffi +cryptography != 38.0.2; 'Windows' == platform_system +cryptography != 38.0.2; 'Linux' == platform_system and ('x86_64' == platform_machine or 'aarch64' == platform_machine) +cryptography <= 3.3.2; 'Linux' == platform_system and 'x86_64' != platform_machine and 'aarch64' != platform_machine +cryptography != 38.0.2; 'Darwin' == platform_system and 'x86_64' == platform_machine +cryptography <= 3.3.2; 'Darwin' == platform_system and 'x86_64' != platform_machine lxml >= 4.9.2; 'Windows' == platform_system lxml; 'Windows' != platform_system -orjson; '3.7' <= python_version and 'Windows' == platform_system -orjson; '3.7' <= python_version and 'Linux' == platform_system and ('x86_64' == platform_machine or 'aarch64' == platform_machine or 'armv7l' == platform_machine) -pip >= 22.2.2; '3.7' <= python_version -pip <= 20.3.4; '3.0' > python_version -Levenshtein >= 0.20.5; '3.11' >= python_version and '3.7' <= python_version -rapidfuzz < 3.0.0; '3.7' <= python_version -python-Levenshtein == 0.12.0; '3.0' > python_version -regex >= 2022.9.13; '3.11' >= python_version and '3.7' <= python_version -regex <= 2020.10.28; '3.0' > python_version -scandir >= 1.10.0; '3.0' > python_version -setuptools >= 65.2.0; '3.7' <= python_version -setuptools <= 44.1.1; '3.0' > python_version +orjson; 'Windows' == platform_system +orjson; 'Linux' == platform_system and ('x86_64' == platform_machine or 'aarch64' == platform_machine or 'armv7l' == platform_machine) +pip +Levenshtein +rapidfuzz < 3.0.0 +regex +setuptools diff --git a/requirements.txt b/requirements.txt index c09c9121..2abbce53 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1 @@ -Cheetah3 <= 3.2.6, != 3.2.6.post2; '3.0' > python_version -CT3; '3.7' <= python_version +CT3 From 6e859f6f0c7e0c7f41aead562b51a7baf1927b12 Mon Sep 17 00:00:00 2001 From: JackDandy Date: Fri, 17 Feb 2023 01:17:58 +0000 Subject: [PATCH 03/21] =?UTF-8?q?Update=20package=20resource=20API=2063.2.?= =?UTF-8?q?0=20(3ae44cd)=20=E2=86=92=2067.3.2=20(b9bf2ec).?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CHANGES.md | 1 + lib/pkg_resources/__init__.py | 533 +- lib/pkg_resources/_vendor/appdirs.py | 608 -- lib/pkg_resources/_vendor/jaraco/context.py | 75 + .../_vendor/more_itertools/__init__.py | 4 +- .../_vendor/more_itertools/more.py | 224 +- .../_vendor/more_itertools/more.pyi | 36 +- .../_vendor/more_itertools/recipes.py | 175 +- .../_vendor/more_itertools/recipes.pyi | 26 +- .../_vendor/packaging/__about__.py | 26 - .../_vendor/packaging/__init__.py | 30 +- .../_vendor/packaging/_elffile.py | 108 + .../_vendor/packaging/_manylinux.py | 145 +- .../_vendor/packaging/_musllinux.py | 72 +- .../_vendor/packaging/_parser.py | 328 + .../_vendor/packaging/_tokenizer.py | 188 + .../_vendor/packaging/markers.py | 201 +- .../_vendor/packaging/requirements.py | 121 +- .../_vendor/packaging/specifiers.py | 911 ++- lib/pkg_resources/_vendor/packaging/tags.py | 75 +- lib/pkg_resources/_vendor/packaging/utils.py | 11 +- .../_vendor/packaging/version.py | 333 +- .../_vendor/platformdirs/__init__.py | 342 + .../_vendor/platformdirs/__main__.py | 46 + .../_vendor/platformdirs/android.py | 120 + lib/pkg_resources/_vendor/platformdirs/api.py | 156 + .../_vendor/platformdirs/macos.py | 64 + .../{pyparsing => platformdirs}/py.typed | 0 .../_vendor/platformdirs/unix.py | 181 + .../_vendor/platformdirs/version.py | 4 + .../_vendor/platformdirs/windows.py | 184 + .../_vendor/pyparsing/__init__.py | 331 - .../_vendor/pyparsing/actions.py | 207 - lib/pkg_resources/_vendor/pyparsing/common.py | 424 -- lib/pkg_resources/_vendor/pyparsing/core.py | 5814 ----------------- .../_vendor/pyparsing/diagram/__init__.py | 642 -- .../_vendor/pyparsing/exceptions.py | 267 - .../_vendor/pyparsing/helpers.py | 1088 --- .../_vendor/pyparsing/results.py | 760 --- .../_vendor/pyparsing/testing.py | 331 - .../_vendor/pyparsing/unicode.py | 352 - lib/pkg_resources/_vendor/pyparsing/util.py | 235 - .../_vendor/typing_extensions.py | 2209 +++++++ lib/pkg_resources/_vendor/vendored.txt | 9 +- lib/pkg_resources/api_tests.txt | 55 +- lib/pkg_resources/extern/__init__.py | 8 +- 46 files changed, 5638 insertions(+), 12422 deletions(-) delete mode 100644 lib/pkg_resources/_vendor/appdirs.py delete mode 100644 lib/pkg_resources/_vendor/packaging/__about__.py create mode 100644 lib/pkg_resources/_vendor/packaging/_elffile.py create mode 100644 lib/pkg_resources/_vendor/packaging/_parser.py create mode 100644 lib/pkg_resources/_vendor/packaging/_tokenizer.py create mode 100644 lib/pkg_resources/_vendor/platformdirs/__init__.py create mode 100644 lib/pkg_resources/_vendor/platformdirs/__main__.py create mode 100644 lib/pkg_resources/_vendor/platformdirs/android.py create mode 100644 lib/pkg_resources/_vendor/platformdirs/api.py create mode 100644 lib/pkg_resources/_vendor/platformdirs/macos.py rename lib/pkg_resources/_vendor/{pyparsing => platformdirs}/py.typed (100%) create mode 100644 lib/pkg_resources/_vendor/platformdirs/unix.py create mode 100644 lib/pkg_resources/_vendor/platformdirs/version.py create mode 100644 lib/pkg_resources/_vendor/platformdirs/windows.py delete mode 100644 lib/pkg_resources/_vendor/pyparsing/__init__.py delete mode 100644 lib/pkg_resources/_vendor/pyparsing/actions.py delete mode 100644 lib/pkg_resources/_vendor/pyparsing/common.py delete mode 100644 lib/pkg_resources/_vendor/pyparsing/core.py delete mode 100644 lib/pkg_resources/_vendor/pyparsing/diagram/__init__.py delete mode 100644 lib/pkg_resources/_vendor/pyparsing/exceptions.py delete mode 100644 lib/pkg_resources/_vendor/pyparsing/helpers.py delete mode 100644 lib/pkg_resources/_vendor/pyparsing/results.py delete mode 100644 lib/pkg_resources/_vendor/pyparsing/testing.py delete mode 100644 lib/pkg_resources/_vendor/pyparsing/unicode.py delete mode 100644 lib/pkg_resources/_vendor/pyparsing/util.py create mode 100644 lib/pkg_resources/_vendor/typing_extensions.py diff --git a/CHANGES.md b/CHANGES.md index 2389ace7..0ccc1e05 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,6 @@ ### 3.28.0 (2023-xx-xx xx:xx:00 UTC) +* Update package resource API 63.2.0 (3ae44cd) to 67.3.2 (b9bf2ec) * Change remove calls to legacy py2 fix encoding function * Change requirements for pure py3 diff --git a/lib/pkg_resources/__init__.py b/lib/pkg_resources/__init__.py index d59226af..1eb3f9e2 100644 --- a/lib/pkg_resources/__init__.py +++ b/lib/pkg_resources/__init__.py @@ -34,7 +34,6 @@ import email.parser import errno import tempfile import textwrap -import itertools import inspect import ntpath import posixpath @@ -54,8 +53,10 @@ except NameError: # capture these to bypass sandboxing from os import utime + try: from os import mkdir, rename, unlink + WRITE_SUPPORT = True except ImportError: # no write support, probably under GAE @@ -66,6 +67,7 @@ from os.path import isdir, split try: import importlib.machinery as importlib_machinery + # access attribute to force import under delayed import mechanisms. importlib_machinery.__name__ except ImportError: @@ -77,8 +79,9 @@ from pkg_resources.extern.jaraco.text import ( join_continuation, ) -from pkg_resources.extern import appdirs +from pkg_resources.extern import platformdirs from pkg_resources.extern import packaging + __import__('pkg_resources.extern.packaging.version') __import__('pkg_resources.extern.packaging.specifiers') __import__('pkg_resources.extern.packaging.requirements') @@ -116,16 +119,7 @@ class PEP440Warning(RuntimeWarning): """ -def parse_version(v): - try: - return packaging.version.Version(v) - except packaging.version.InvalidVersion: - warnings.warn( - f"{v} is an invalid version and will not be supported in " - "a future release", - PkgResourcesDeprecationWarning, - ) - return packaging.version.LegacyVersion(v) +parse_version = packaging.version.Version _state_vars = {} @@ -197,51 +191,87 @@ def get_supported_platform(): __all__ = [ # Basic resource access and distribution/entry point discovery - 'require', 'run_script', 'get_provider', 'get_distribution', - 'load_entry_point', 'get_entry_map', 'get_entry_info', + 'require', + 'run_script', + 'get_provider', + 'get_distribution', + 'load_entry_point', + 'get_entry_map', + 'get_entry_info', 'iter_entry_points', - 'resource_string', 'resource_stream', 'resource_filename', - 'resource_listdir', 'resource_exists', 'resource_isdir', - + 'resource_string', + 'resource_stream', + 'resource_filename', + 'resource_listdir', + 'resource_exists', + 'resource_isdir', # Environmental control - 'declare_namespace', 'working_set', 'add_activation_listener', - 'find_distributions', 'set_extraction_path', 'cleanup_resources', + 'declare_namespace', + 'working_set', + 'add_activation_listener', + 'find_distributions', + 'set_extraction_path', + 'cleanup_resources', 'get_default_cache', - # Primary implementation classes - 'Environment', 'WorkingSet', 'ResourceManager', - 'Distribution', 'Requirement', 'EntryPoint', - + 'Environment', + 'WorkingSet', + 'ResourceManager', + 'Distribution', + 'Requirement', + 'EntryPoint', # Exceptions - 'ResolutionError', 'VersionConflict', 'DistributionNotFound', - 'UnknownExtra', 'ExtractionError', - + 'ResolutionError', + 'VersionConflict', + 'DistributionNotFound', + 'UnknownExtra', + 'ExtractionError', # Warnings 'PEP440Warning', - # Parsing functions and string utilities - 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', - 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', - 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', - + 'parse_requirements', + 'parse_version', + 'safe_name', + 'safe_version', + 'get_platform', + 'compatible_platforms', + 'yield_lines', + 'split_sections', + 'safe_extra', + 'to_filename', + 'invalid_marker', + 'evaluate_marker', # filesystem utilities - 'ensure_directory', 'normalize_path', - + 'ensure_directory', + 'normalize_path', # Distribution "precedence" constants - 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', - + 'EGG_DIST', + 'BINARY_DIST', + 'SOURCE_DIST', + 'CHECKOUT_DIST', + 'DEVELOP_DIST', # "Provider" interfaces, implementations, and registration/lookup APIs - 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', - 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', - 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', - 'register_finder', 'register_namespace_handler', 'register_loader_type', - 'fixup_namespace_packages', 'get_importer', - + 'IMetadataProvider', + 'IResourceProvider', + 'FileMetadata', + 'PathMetadata', + 'EggMetadata', + 'EmptyProvider', + 'empty_provider', + 'NullProvider', + 'EggProvider', + 'DefaultProvider', + 'ZipProvider', + 'register_finder', + 'register_namespace_handler', + 'register_loader_type', + 'fixup_namespace_packages', + 'get_importer', # Warnings 'PkgResourcesDeprecationWarning', - # Deprecated/backward compatibility only - 'run_main', 'AvailableDistributions', + 'run_main', + 'AvailableDistributions', ] @@ -300,8 +330,10 @@ class ContextualVersionConflict(VersionConflict): class DistributionNotFound(ResolutionError): """A requested distribution was not found""" - _template = ("The '{self.req}' distribution was not found " - "and is required by {self.requirers_str}") + _template = ( + "The '{self.req}' distribution was not found " + "and is required by {self.requirers_str}" + ) @property def req(self): @@ -395,7 +427,8 @@ def get_build_platform(): version = _macos_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % ( - int(version[0]), int(version[1]), + int(version[0]), + int(version[1]), _macos_arch(machine), ) except ValueError: @@ -436,15 +469,18 @@ def compatible_platforms(provided, required): if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) - if dversion == 7 and macosversion >= "10.3" or \ - dversion == 8 and macosversion >= "10.4": + if ( + dversion == 7 + and macosversion >= "10.3" + or dversion == 8 + and macosversion >= "10.4" + ): return True # egg isn't macOS or legacy darwin return False # are they the same major version and machine type? - if provMac.group(1) != reqMac.group(1) or \ - provMac.group(3) != reqMac.group(3): + if provMac.group(1) != reqMac.group(1) or provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? @@ -506,8 +542,8 @@ class IMetadataProvider: def get_metadata_lines(name): """Yield named metadata resource as list of non-blank non-comment lines - Leading and trailing whitespace is stripped from each line, and lines - with ``#`` as the first non-blank character are omitted.""" + Leading and trailing whitespace is stripped from each line, and lines + with ``#`` as the first non-blank character are omitted.""" def metadata_isdir(name): """Is the named metadata a directory? (like ``os.path.isdir()``)""" @@ -720,9 +756,14 @@ class WorkingSet: keys2.append(dist.key) self._added_new(dist) - # FIXME: 'WorkingSet.resolve' is too complex (11) - def resolve(self, requirements, env=None, installer=None, # noqa: C901 - replace_conflicting=False, extras=None): + def resolve( + self, + requirements, + env=None, + installer=None, + replace_conflicting=False, + extras=None, + ): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, @@ -771,33 +812,9 @@ class WorkingSet: if not req_extras.markers_pass(req, extras): continue - dist = best.get(req.key) - if dist is None: - # Find the best distribution and add it to the map - dist = self.by_key.get(req.key) - if dist is None or (dist not in req and replace_conflicting): - ws = self - if env is None: - if dist is None: - env = Environment(self.entries) - else: - # Use an empty environment and workingset to avoid - # any further conflicts with the conflicting - # distribution - env = Environment([]) - ws = WorkingSet([]) - dist = best[req.key] = env.best_match( - req, ws, installer, - replace_conflicting=replace_conflicting - ) - if dist is None: - requirers = required_by.get(req, None) - raise DistributionNotFound(req, requirers) - to_activate.append(dist) - if dist not in req: - # Oops, the "best" so far conflicts with a dependency - dependent_req = required_by[req] - raise VersionConflict(dist, req).with_context(dependent_req) + dist = self._resolve_dist( + req, best, replace_conflicting, env, installer, required_by, to_activate + ) # push the new requirements onto the stack new_requirements = dist.requires(req.extras)[::-1] @@ -813,8 +830,38 @@ class WorkingSet: # return list of distros to activate return to_activate - def find_plugins( - self, plugin_env, full_env=None, installer=None, fallback=True): + def _resolve_dist( + self, req, best, replace_conflicting, env, installer, required_by, to_activate + ): + dist = best.get(req.key) + if dist is None: + # Find the best distribution and add it to the map + dist = self.by_key.get(req.key) + if dist is None or (dist not in req and replace_conflicting): + ws = self + if env is None: + if dist is None: + env = Environment(self.entries) + else: + # Use an empty environment and workingset to avoid + # any further conflicts with the conflicting + # distribution + env = Environment([]) + ws = WorkingSet([]) + dist = best[req.key] = env.best_match( + req, ws, installer, replace_conflicting=replace_conflicting + ) + if dist is None: + requirers = required_by.get(req, None) + raise DistributionNotFound(req, requirers) + to_activate.append(dist) + if dist not in req: + # Oops, the "best" so far conflicts with a dependency + dependent_req = required_by[req] + raise VersionConflict(dist, req).with_context(dependent_req) + return dist + + def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: @@ -933,8 +980,11 @@ class WorkingSet: def __getstate__(self): return ( - self.entries[:], self.entry_keys.copy(), self.by_key.copy(), - self.normalized_to_canonical_keys.copy(), self.callbacks[:] + self.entries[:], + self.entry_keys.copy(), + self.by_key.copy(), + self.normalized_to_canonical_keys.copy(), + self.callbacks[:], ) def __setstate__(self, e_k_b_n_c): @@ -970,8 +1020,8 @@ class Environment: """Searchable snapshot of distributions on a search path""" def __init__( - self, search_path=None, platform=get_supported_platform(), - python=PY_MAJOR): + self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR + ): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. @@ -1038,16 +1088,14 @@ class Environment: return self._distmap.get(distribution_key, []) def add(self, dist): - """Add `dist` if we ``can_add()`` it and it has not already been added - """ + """Add `dist` if we ``can_add()`` it and it has not already been added""" if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) - def best_match( - self, req, working_set, installer=None, replace_conflicting=False): + def best_match(self, req, working_set, installer=None, replace_conflicting=False): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a @@ -1134,6 +1182,7 @@ class ExtractionError(RuntimeError): class ResourceManager: """Manage resource extraction and packages""" + extraction_path = None def __init__(self): @@ -1145,9 +1194,7 @@ class ResourceManager: def resource_isdir(self, package_or_requirement, resource_name): """Is the named resource an existing directory?""" - return get_provider(package_or_requirement).resource_isdir( - resource_name - ) + return get_provider(package_or_requirement).resource_isdir(resource_name) def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" @@ -1169,9 +1216,7 @@ class ResourceManager: def resource_listdir(self, package_or_requirement, resource_name): """List the contents of the named resource directory""" - return get_provider(package_or_requirement).resource_listdir( - resource_name - ) + return get_provider(package_or_requirement).resource_listdir(resource_name) def extraction_error(self): """Give an error message for problems extracting file(s)""" @@ -1179,7 +1224,8 @@ class ResourceManager: old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() - tmpl = textwrap.dedent(""" + tmpl = textwrap.dedent( + """ Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) @@ -1194,7 +1240,8 @@ class ResourceManager: Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. - """).lstrip() + """ + ).lstrip() err = ExtractionError(tmpl.format(**locals())) err.manager = self err.cache_path = cache_path @@ -1293,9 +1340,7 @@ class ResourceManager: ``cleanup_resources()``.) """ if self.cached_files: - raise ValueError( - "Can't change extraction path, files already extracted" - ) + raise ValueError("Can't change extraction path, files already extracted") self.extraction_path = path @@ -1319,9 +1364,8 @@ def get_default_cache(): or a platform-relevant user cache dir for an app named "Python-Eggs". """ - return ( - os.environ.get('PYTHON_EGG_CACHE') - or appdirs.user_cache_dir(appname='Python-Eggs') + return os.environ.get('PYTHON_EGG_CACHE') or platformdirs.user_cache_dir( + appname='Python-Eggs' ) @@ -1458,8 +1502,9 @@ class NullProvider: script = 'scripts/' + script_name if not self.has_metadata(script): raise ResolutionError( - "Script {script!r} not found in metadata at {self.egg_info!r}" - .format(**locals()), + "Script {script!r} not found in metadata at {self.egg_info!r}".format( + **locals() + ), ) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') @@ -1472,8 +1517,12 @@ class NullProvider: exec(code, namespace, namespace) else: from linecache import cache + cache[script_filename] = ( - len(script_text), 0, script_text.split('\n'), script_filename + len(script_text), + 0, + script_text.split('\n'), + script_filename, ) script_code = compile(script_text, script_filename, 'exec') exec(script_code, namespace, namespace) @@ -1553,9 +1602,9 @@ is not allowed. AttributeError: ... """ invalid = ( - os.path.pardir in path.split(posixpath.sep) or - posixpath.isabs(path) or - ntpath.isabs(path) + os.path.pardir in path.split(posixpath.sep) + or posixpath.isabs(path) + or ntpath.isabs(path) ) if not invalid: return @@ -1637,7 +1686,10 @@ class DefaultProvider(EggProvider): @classmethod def _register(cls): - loader_names = 'SourceFileLoader', 'SourcelessFileLoader', + loader_names = ( + 'SourceFileLoader', + 'SourcelessFileLoader', + ) for name in loader_names: loader_cls = getattr(importlib_machinery, name, type(None)) register_loader_type(loader_cls, cls) @@ -1697,6 +1749,7 @@ class MemoizedZipManifests(ZipManifests): """ Memoized zipfile manifests. """ + manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') def load(self, path): @@ -1730,20 +1783,16 @@ class ZipProvider(EggProvider): if fspath == self.loader.archive: return '' if fspath.startswith(self.zip_pre): - return fspath[len(self.zip_pre):] - raise AssertionError( - "%s is not a subpath of %s" % (fspath, self.zip_pre) - ) + return fspath[len(self.zip_pre) :] + raise AssertionError("%s is not a subpath of %s" % (fspath, self.zip_pre)) def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path fspath = self.zip_pre + zip_path if fspath.startswith(self.egg_root + os.sep): - return fspath[len(self.egg_root) + 1:].split(os.sep) - raise AssertionError( - "%s is not a subpath of %s" % (fspath, self.egg_root) - ) + return fspath[len(self.egg_root) + 1 :].split(os.sep) + raise AssertionError("%s is not a subpath of %s" % (fspath, self.egg_root)) @property def zipinfo(self): @@ -1776,22 +1825,19 @@ class ZipProvider(EggProvider): if zip_path in self._index(): for name in self._index()[zip_path]: - last = self._extract_resource( - manager, os.path.join(zip_path, name) - ) + last = self._extract_resource(manager, os.path.join(zip_path, name)) # return the extracted directory name return os.path.dirname(last) timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: - raise IOError('"os.rename" and "os.unlink" are not supported ' - 'on this platform') + raise IOError( + '"os.rename" and "os.unlink" are not supported ' 'on this platform' + ) try: - real_path = manager.get_cache_path( - self.egg_name, self._parts(zip_path) - ) + real_path = manager.get_cache_path(self.egg_name, self._parts(zip_path)) if self._is_current(real_path, zip_path): return real_path @@ -2027,70 +2073,21 @@ def find_nothing(importer, path_item, only=False): register_finder(object, find_nothing) -def _by_version_descending(names): - """ - Given a list of filenames, return them in descending order - by version number. - - >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' - >>> _by_version_descending(names) - ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'bar', 'foo'] - >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' - >>> _by_version_descending(names) - ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] - >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' - >>> _by_version_descending(names) - ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] - """ - def try_parse(name): - """ - Attempt to parse as a version or return a null version. - """ - try: - return packaging.version.Version(name) - except Exception: - return packaging.version.Version('0') - - def _by_version(name): - """ - Parse each component of the filename - """ - name, ext = os.path.splitext(name) - parts = itertools.chain(name.split('-'), [ext]) - return [try_parse(part) for part in parts] - - return sorted(names, key=_by_version, reverse=True) - - def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if _is_unpacked_egg(path_item): yield Distribution.from_filename( - path_item, metadata=PathMetadata( - path_item, os.path.join(path_item, 'EGG-INFO') - ) + path_item, + metadata=PathMetadata(path_item, os.path.join(path_item, 'EGG-INFO')), ) return - entries = ( - os.path.join(path_item, child) - for child in safe_listdir(path_item) - ) - - # for performance, before sorting by version, - # screen entries for only those that will yield - # distributions - filtered = ( - entry - for entry in entries - if dist_factory(path_item, entry, only) - ) + entries = (os.path.join(path_item, child) for child in safe_listdir(path_item)) # scan for .egg and .egg-info in directory - path_item_entries = _by_version_descending(filtered) - for entry in path_item_entries: + for entry in sorted(entries): fullpath = os.path.join(path_item, entry) factory = dist_factory(path_item, entry, only) for dist in factory(fullpath): @@ -2101,19 +2098,18 @@ def dist_factory(path_item, entry, only): """Return a dist_factory for the given entry.""" lower = entry.lower() is_egg_info = lower.endswith('.egg-info') - is_dist_info = ( - lower.endswith('.dist-info') and - os.path.isdir(os.path.join(path_item, entry)) + is_dist_info = lower.endswith('.dist-info') and os.path.isdir( + os.path.join(path_item, entry) ) is_meta = is_egg_info or is_dist_info return ( distributions_from_metadata - if is_meta else - find_distributions - if not only and _is_egg_path(entry) else - resolve_egg_link - if not only and lower.endswith('.egg-link') else - NoDists() + if is_meta + else find_distributions + if not only and _is_egg_path(entry) + else resolve_egg_link + if not only and lower.endswith('.egg-link') + else NoDists() ) @@ -2125,6 +2121,7 @@ class NoDists: >>> list(NoDists()('anything')) [] """ + def __bool__(self): return False @@ -2159,7 +2156,10 @@ def distributions_from_metadata(path): metadata = FileMetadata(path) entry = os.path.basename(path) yield Distribution.from_location( - root, entry, metadata, precedence=DEVELOP_DIST, + root, + entry, + metadata, + precedence=DEVELOP_DIST, ) @@ -2181,17 +2181,16 @@ def resolve_egg_link(path): """ referenced_paths = non_empty_lines(path) resolved_paths = ( - os.path.join(os.path.dirname(path), ref) - for ref in referenced_paths + os.path.join(os.path.dirname(path), ref) for ref in referenced_paths ) dist_groups = map(find_distributions, resolved_paths) return next(dist_groups, ()) -register_finder(pkgutil.ImpImporter, find_on_path) +if hasattr(pkgutil, 'ImpImporter'): + register_finder(pkgutil.ImpImporter, find_on_path) -if hasattr(importlib_machinery, 'FileFinder'): - register_finder(importlib_machinery.FileFinder, find_on_path) +register_finder(importlib_machinery.FileFinder, find_on_path) _declare_state('dict', _namespace_handlers={}) _declare_state('dict', _namespace_packages={}) @@ -2289,6 +2288,15 @@ def _rebuild_mod_path(orig_path, package_name, module): def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" + msg = ( + f"Deprecated call to `pkg_resources.declare_namespace({packageName!r})`.\n" + "Implementing implicit namespace packages (as specified in PEP 420) " + "is preferred to `pkg_resources.declare_namespace`. " + "See https://setuptools.pypa.io/en/latest/references/" + "keywords.html#keyword-namespace-packages" + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + _imp.acquire_lock() try: if packageName in _namespace_packages: @@ -2345,11 +2353,11 @@ def file_ns_handler(importer, path_item, packageName, module): return subpath -register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) -register_namespace_handler(zipimport.zipimporter, file_ns_handler) +if hasattr(pkgutil, 'ImpImporter'): + register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) -if hasattr(importlib_machinery, 'FileFinder'): - register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) +register_namespace_handler(zipimport.zipimporter, file_ns_handler) +register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) def null_ns_handler(importer, path_item, packageName, module): @@ -2361,8 +2369,7 @@ register_namespace_handler(object, null_ns_handler) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" - return os.path.normcase(os.path.realpath(os.path.normpath( - _cygwin_patch(filename)))) + return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename)))) def _cygwin_patch(filename): # pragma: nocover @@ -2393,9 +2400,9 @@ def _is_egg_path(path): def _is_zip_egg(path): return ( - path.lower().endswith('.egg') and - os.path.isfile(path) and - zipfile.is_zipfile(path) + path.lower().endswith('.egg') + and os.path.isfile(path) + and zipfile.is_zipfile(path) ) @@ -2403,9 +2410,8 @@ def _is_unpacked_egg(path): """ Determine if given path appears to be an unpacked egg. """ - return ( - path.lower().endswith('.egg') and - os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) + return path.lower().endswith('.egg') and os.path.isfile( + os.path.join(path, 'EGG-INFO', 'PKG-INFO') ) @@ -2569,8 +2575,10 @@ def _version_from_file(lines): Given an iterable of lines from a Metadata file, return the value of the Version field, if present, or None otherwise. """ + def is_version_line(line): return line.lower().startswith('version:') + version_lines = filter(is_version_line, lines) line = next(iter(version_lines), '') _, _, value = line.partition(':') @@ -2579,12 +2587,19 @@ def _version_from_file(lines): class Distribution: """Wrap an actual or potential sys.path entry w/metadata""" + PKG_INFO = 'PKG-INFO' def __init__( - self, location=None, metadata=None, project_name=None, - version=None, py_version=PY_MAJOR, platform=None, - precedence=EGG_DIST): + self, + location=None, + metadata=None, + project_name=None, + version=None, + py_version=PY_MAJOR, + platform=None, + precedence=EGG_DIST, + ): self.project_name = safe_name(project_name or 'Unknown') if version is not None: self._version = safe_version(version) @@ -2607,8 +2622,13 @@ class Distribution: 'name', 'ver', 'pyver', 'plat' ) return cls( - location, metadata, project_name=project_name, version=version, - py_version=py_version, platform=platform, **kw + location, + metadata, + project_name=project_name, + version=version, + py_version=py_version, + platform=platform, + **kw, )._reload_version() def _reload_version(self): @@ -2664,36 +2684,17 @@ class Distribution: @property def parsed_version(self): if not hasattr(self, "_parsed_version"): - self._parsed_version = parse_version(self.version) + try: + self._parsed_version = parse_version(self.version) + except packaging.version.InvalidVersion as ex: + info = f"(package: {self.project_name})" + if hasattr(ex, "add_note"): + ex.add_note(info) # PEP 678 + raise + raise packaging.version.InvalidVersion(f"{str(ex)} {info}") from None return self._parsed_version - def _warn_legacy_version(self): - LV = packaging.version.LegacyVersion - is_legacy = isinstance(self._parsed_version, LV) - if not is_legacy: - return - - # While an empty version is technically a legacy version and - # is not a valid PEP 440 version, it's also unlikely to - # actually come from someone and instead it is more likely that - # it comes from setuptools attempting to parse a filename and - # including it in the list. So for that we'll gate this warning - # on if the version is anything at all or not. - if not self.version: - return - - tmpl = textwrap.dedent(""" - '{project_name} ({version})' is being parsed as a legacy, - non PEP 440, - version. You may find odd behavior and sort order. - In particular it will be sorted as less than 0.0. It - is recommended to migrate to PEP 440 compatible - versions. - """).strip().replace('\n', ' ') - - warnings.warn(tmpl.format(**vars(self)), PEP440Warning) - @property def version(self): try: @@ -2702,9 +2703,9 @@ class Distribution: version = self._get_version() if version is None: path = self._get_metadata_path_for_display(self.PKG_INFO) - msg = ( - "Missing 'Version:' header and/or {} file at path: {}" - ).format(self.PKG_INFO, path) + msg = ("Missing 'Version:' header and/or {} file at path: {}").format( + self.PKG_INFO, path + ) raise ValueError(msg, self) from e return version @@ -2733,8 +2734,7 @@ class Distribution: reqs = dm.pop(extra) new_extra, _, marker = extra.partition(':') fails_marker = marker and ( - invalid_marker(marker) - or not evaluate_marker(marker) + invalid_marker(marker) or not evaluate_marker(marker) ) if fails_marker: reqs = [] @@ -2806,8 +2806,9 @@ class Distribution: def egg_name(self): """Return what this distribution's standard .egg filename should be""" filename = "%s-%s-py%s" % ( - to_filename(self.project_name), to_filename(self.version), - self.py_version or PY_MAJOR + to_filename(self.project_name), + to_filename(self.version), + self.py_version or PY_MAJOR, ) if self.platform: @@ -2837,17 +2838,13 @@ class Distribution: def __dir__(self): return list( set(super(Distribution, self).__dir__()) - | set( - attr for attr in self._provider.__dir__() - if not attr.startswith('_') - ) + | set(attr for attr in self._provider.__dir__() if not attr.startswith('_')) ) @classmethod def from_filename(cls, filename, metadata=None, **kw): return cls.from_location( - _normalize_cached(filename), os.path.basename(filename), metadata, - **kw + _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): @@ -2959,14 +2956,18 @@ class Distribution: nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) loc = normalize_path(self.location) for modname in self._get_metadata('top_level.txt'): - if (modname not in sys.modules or modname in nsp - or modname in _namespace_packages): + if ( + modname not in sys.modules + or modname in nsp + or modname in _namespace_packages + ): continue if modname in ('pkg_resources', 'setuptools', 'site'): continue fn = getattr(sys.modules[modname], '__file__', None) - if fn and (normalize_path(fn).startswith(loc) or - fn.startswith(self.location)): + if fn and ( + normalize_path(fn).startswith(loc) or fn.startswith(self.location) + ): continue issue_warning( "Module %s was already imported from %s, but %s is being added" @@ -3018,6 +3019,7 @@ class DistInfoDistribution(Distribution): Wrap an actual or potential sys.path entry w/metadata, .dist-info style. """ + PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @@ -3103,8 +3105,7 @@ class Requirement(packaging.requirements.Requirement): self.unsafe_name = self.name project_name = safe_name(self.name) self.project_name, self.key = project_name, project_name.lower() - self.specs = [ - (spec.operator, spec.version) for spec in self.specifier] + self.specs = [(spec.operator, spec.version) for spec in self.specifier] self.extras = tuple(map(safe_extra, self.extras)) self.hashCmp = ( self.key, @@ -3116,10 +3117,7 @@ class Requirement(packaging.requirements.Requirement): self.__hash = hash(self.hashCmp) def __eq__(self, other): - return ( - isinstance(other, Requirement) and - self.hashCmp == other.hashCmp - ) + return isinstance(other, Requirement) and self.hashCmp == other.hashCmp def __ne__(self, other): return not self == other @@ -3144,7 +3142,7 @@ class Requirement(packaging.requirements.Requirement): @staticmethod def parse(s): - req, = parse_requirements(s) + (req,) = parse_requirements(s) return req @@ -3282,10 +3280,7 @@ def _initialize_master_working_set(): # ensure that all distributions added to the working set in the future # (e.g. by calling ``require()``) will get activated as well, # with higher priority (replace=True). - tuple( - dist.activate(replace=False) - for dist in working_set - ) + tuple(dist.activate(replace=False) for dist in working_set) add_activation_listener( lambda dist: dist.activate(replace=True), existing=False, diff --git a/lib/pkg_resources/_vendor/appdirs.py b/lib/pkg_resources/_vendor/appdirs.py deleted file mode 100644 index ae67001a..00000000 --- a/lib/pkg_resources/_vendor/appdirs.py +++ /dev/null @@ -1,608 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright (c) 2005-2010 ActiveState Software Inc. -# Copyright (c) 2013 Eddy Petrișor - -"""Utilities for determining application-specific dirs. - -See for details and usage. -""" -# Dev Notes: -# - MSDN on where to store app data files: -# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 -# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html -# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - -__version_info__ = (1, 4, 3) -__version__ = '.'.join(map(str, __version_info__)) - - -import sys -import os - -PY3 = sys.version_info[0] == 3 - -if PY3: - unicode = str - -if sys.platform.startswith('java'): - import platform - os_name = platform.java_ver()[3][0] - if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. - system = 'win32' - elif os_name.startswith('Mac'): # "Mac OS X", etc. - system = 'darwin' - else: # "Linux", "SunOS", "FreeBSD", etc. - # Setting this to "linux2" is not ideal, but only Windows or Mac - # are actually checked for and the rest of the module expects - # *sys.platform* style strings. - system = 'linux2' -else: - system = sys.platform - - - -def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user data directories are: - Mac OS X: ~/Library/Application Support/ - Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined - Win XP (not roaming): C:\Documents and Settings\\Application Data\\ - Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ - Win 7 (not roaming): C:\Users\\AppData\Local\\ - Win 7 (roaming): C:\Users\\AppData\Roaming\\ - - For Unix, we follow the XDG spec and support $XDG_DATA_HOME. - That means, by default "~/.local/share/". - """ - if system == "win32": - if appauthor is None: - appauthor = appname - const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" - path = os.path.normpath(_get_win_folder(const)) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('~/Library/Application Support/') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of data dirs should be - returned. By default, the first item from XDG_DATA_DIRS is - returned, or '/usr/local/share/', - if XDG_DATA_DIRS is not set - - Typical site data directories are: - Mac OS X: /Library/Application Support/ - Unix: /usr/local/share/ or /usr/share/ - Win XP: C:\Documents and Settings\All Users\Application Data\\ - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. - - For Unix, this is using the $XDG_DATA_DIRS[0] default. - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('/Library/Application Support') - if appname: - path = os.path.join(path, appname) - else: - # XDG default for $XDG_DATA_DIRS - # only first, if multipath is False - path = os.getenv('XDG_DATA_DIRS', - os.pathsep.join(['/usr/local/share', '/usr/share'])) - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - if appname and version: - path = os.path.join(path, version) - return path - - -def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific config dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user config directories are: - Mac OS X: same as user_data_dir - Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by default "~/.config/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of config dirs should be - returned. By default, the first item from XDG_CONFIG_DIRS is - returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set - - Typical site config directories are: - Mac OS X: same as site_data_dir - Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in - $XDG_CONFIG_DIRS - Win *: same as site_data_dir - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - - For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system in ["win32", "darwin"]: - path = site_data_dir(appname, appauthor) - if appname and version: - path = os.path.join(path, version) - else: - # XDG default for $XDG_CONFIG_DIRS - # only first, if multipath is False - path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - -def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific cache dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Cache" to the base app data dir for Windows. See - discussion below. - - Typical user cache directories are: - Mac OS X: ~/Library/Caches/ - Unix: ~/.cache/ (XDG default) - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache - Vista: C:\Users\\AppData\Local\\\Cache - - On Windows the only suggestion in the MSDN docs is that local settings go in - the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming - app data dir (the default returned by `user_data_dir` above). Apps typically - put cache data somewhere *under* the given dir here. Some examples: - ...\Mozilla\Firefox\Profiles\\Cache - ...\Acme\SuperApp\Cache\1.0 - OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. - This can be disabled with the `opinion=False` option. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - if opinion: - path = os.path.join(path, "Cache") - elif system == 'darwin': - path = os.path.expanduser('~/Library/Caches') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific state dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user state directories are: - Mac OS X: same as user_data_dir - Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow this Debian proposal - to extend the XDG spec and support $XDG_STATE_HOME. - - That means, by default "~/.local/state/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific log dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Logs" to the base app data dir for Windows, and "log" to the - base cache dir for Unix. See discussion below. - - Typical user log directories are: - Mac OS X: ~/Library/Logs/ - Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs - Vista: C:\Users\\AppData\Local\\\Logs - - On Windows the only suggestion in the MSDN docs is that local settings - go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in - examples of what some windows apps use for a logs dir.) - - OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` - value for Windows and appends "log" to the user cache dir for Unix. - This can be disabled with the `opinion=False` option. - """ - if system == "darwin": - path = os.path.join( - os.path.expanduser('~/Library/Logs'), - appname) - elif system == "win32": - path = user_data_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "Logs") - else: - path = user_cache_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "log") - if appname and version: - path = os.path.join(path, version) - return path - - -class AppDirs(object): - """Convenience wrapper for getting application dirs.""" - def __init__(self, appname=None, appauthor=None, version=None, - roaming=False, multipath=False): - self.appname = appname - self.appauthor = appauthor - self.version = version - self.roaming = roaming - self.multipath = multipath - - @property - def user_data_dir(self): - return user_data_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_data_dir(self): - return site_data_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_config_dir(self): - return user_config_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_config_dir(self): - return site_config_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_cache_dir(self): - return user_cache_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_state_dir(self): - return user_state_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_log_dir(self): - return user_log_dir(self.appname, self.appauthor, - version=self.version) - - -#---- internal support stuff - -def _get_win_folder_from_registry(csidl_name): - """This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - if PY3: - import winreg as _winreg - else: - import _winreg - - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - }[csidl_name] - - key = _winreg.OpenKey( - _winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" - ) - dir, type = _winreg.QueryValueEx(key, shell_folder_name) - return dir - - -def _get_win_folder_with_pywin32(csidl_name): - from win32com.shell import shellcon, shell - dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) - # Try to make this a unicode path because SHGetFolderPath does - # not return unicode strings when there is unicode data in the - # path. - try: - dir = unicode(dir) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - try: - import win32api - dir = win32api.GetShortPathName(dir) - except ImportError: - pass - except UnicodeError: - pass - return dir - - -def _get_win_folder_with_ctypes(csidl_name): - import ctypes - - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - }[csidl_name] - - buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in buf: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - -def _get_win_folder_with_jna(csidl_name): - import array - from com.sun import jna - from com.sun.jna.platform import win32 - - buf_size = win32.WinDef.MAX_PATH * 2 - buf = array.zeros('c', buf_size) - shell = win32.Shell32.INSTANCE - shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf = array.zeros('c', buf_size) - kernel = win32.Kernel32.INSTANCE - if kernel.GetShortPathName(dir, buf, buf_size): - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - return dir - -if system == "win32": - try: - import win32com.shell - _get_win_folder = _get_win_folder_with_pywin32 - except ImportError: - try: - from ctypes import windll - _get_win_folder = _get_win_folder_with_ctypes - except ImportError: - try: - import com.sun.jna - _get_win_folder = _get_win_folder_with_jna - except ImportError: - _get_win_folder = _get_win_folder_from_registry - - -#---- self test code - -if __name__ == "__main__": - appname = "MyApp" - appauthor = "MyCompany" - - props = ("user_data_dir", - "user_config_dir", - "user_cache_dir", - "user_state_dir", - "user_log_dir", - "site_data_dir", - "site_config_dir") - - print("-- app dirs %s --" % __version__) - - print("-- app dirs (with optional 'version')") - dirs = AppDirs(appname, appauthor, version="1.0") - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'version')") - dirs = AppDirs(appname, appauthor) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'appauthor')") - dirs = AppDirs(appname) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (with disabled 'appauthor')") - dirs = AppDirs(appname, appauthor=False) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/lib/pkg_resources/_vendor/jaraco/context.py b/lib/pkg_resources/_vendor/jaraco/context.py index 87a4e3dc..b0d1ef37 100644 --- a/lib/pkg_resources/_vendor/jaraco/context.py +++ b/lib/pkg_resources/_vendor/jaraco/context.py @@ -5,10 +5,18 @@ import functools import tempfile import shutil import operator +import warnings @contextlib.contextmanager def pushd(dir): + """ + >>> tmp_path = getfixture('tmp_path') + >>> with pushd(tmp_path): + ... assert os.getcwd() == os.fspath(tmp_path) + >>> assert os.getcwd() != os.fspath(tmp_path) + """ + orig = os.getcwd() os.chdir(dir) try: @@ -29,6 +37,8 @@ def tarball_context(url, target_dir=None, runner=None, pushd=pushd): target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '') if runner is None: runner = functools.partial(subprocess.check_call, shell=True) + else: + warnings.warn("runner parameter is deprecated", DeprecationWarning) # In the tar command, use --strip-components=1 to strip the first path and # then # use -C to cause the files to be extracted to {target_dir}. This ensures @@ -48,6 +58,15 @@ def tarball_context(url, target_dir=None, runner=None, pushd=pushd): def infer_compression(url): """ Given a URL or filename, infer the compression code for tar. + + >>> infer_compression('http://foo/bar.tar.gz') + 'z' + >>> infer_compression('http://foo/bar.tgz') + 'z' + >>> infer_compression('file.bz') + 'j' + >>> infer_compression('file.xz') + 'J' """ # cheat and just assume it's the last two characters compression_indicator = url[-2:] @@ -61,6 +80,12 @@ def temp_dir(remover=shutil.rmtree): """ Create a temporary directory context. Pass a custom remover to override the removal behavior. + + >>> import pathlib + >>> with temp_dir() as the_dir: + ... assert os.path.isdir(the_dir) + ... _ = pathlib.Path(the_dir).joinpath('somefile').write_text('contents') + >>> assert not os.path.exists(the_dir) """ temp_dir = tempfile.mkdtemp() try: @@ -90,6 +115,12 @@ def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir): @contextlib.contextmanager def null(): + """ + A null context suitable to stand in for a meaningful context. + + >>> with null() as value: + ... assert value is None + """ yield @@ -112,6 +143,10 @@ class ExceptionTrap: ... raise ValueError("1 + 1 is not 3") >>> bool(trap) True + >>> trap.value + ValueError('1 + 1 is not 3') + >>> trap.tb + >>> with ExceptionTrap(ValueError) as trap: ... raise Exception() @@ -211,3 +246,43 @@ class suppress(contextlib.suppress, contextlib.ContextDecorator): ... {}[''] >>> key_error() """ + + +class on_interrupt(contextlib.ContextDecorator): + """ + Replace a KeyboardInterrupt with SystemExit(1) + + >>> def do_interrupt(): + ... raise KeyboardInterrupt() + >>> on_interrupt('error')(do_interrupt)() + Traceback (most recent call last): + ... + SystemExit: 1 + >>> on_interrupt('error', code=255)(do_interrupt)() + Traceback (most recent call last): + ... + SystemExit: 255 + >>> on_interrupt('suppress')(do_interrupt)() + >>> with __import__('pytest').raises(KeyboardInterrupt): + ... on_interrupt('ignore')(do_interrupt)() + """ + + def __init__( + self, + action='error', + # py3.7 compat + # /, + code=1, + ): + self.action = action + self.code = code + + def __enter__(self): + return self + + def __exit__(self, exctype, excinst, exctb): + if exctype is not KeyboardInterrupt or self.action == 'ignore': + return + elif self.action == 'error': + raise SystemExit(self.code) from excinst + return self.action == 'suppress' diff --git a/lib/pkg_resources/_vendor/more_itertools/__init__.py b/lib/pkg_resources/_vendor/more_itertools/__init__.py index ea38bef1..557bfc20 100644 --- a/lib/pkg_resources/_vendor/more_itertools/__init__.py +++ b/lib/pkg_resources/_vendor/more_itertools/__init__.py @@ -1,4 +1,6 @@ +"""More routines for operating on iterables, beyond itertools""" + from .more import * # noqa from .recipes import * # noqa -__version__ = '8.12.0' +__version__ = '9.0.0' diff --git a/lib/pkg_resources/_vendor/more_itertools/more.py b/lib/pkg_resources/_vendor/more_itertools/more.py index 6b6a5cab..0b29fca0 100644 --- a/lib/pkg_resources/_vendor/more_itertools/more.py +++ b/lib/pkg_resources/_vendor/more_itertools/more.py @@ -3,7 +3,7 @@ import warnings from collections import Counter, defaultdict, deque, abc from collections.abc import Sequence from functools import partial, reduce, wraps -from heapq import merge, heapify, heapreplace, heappop +from heapq import heapify, heapreplace, heappop from itertools import ( chain, compress, @@ -26,12 +26,16 @@ from sys import hexversion, maxsize from time import monotonic from .recipes import ( + _marker, + _zip_equal, + UnequalIterablesError, consume, flatten, pairwise, powerset, take, unique_everseen, + all_equal, ) __all__ = [ @@ -48,9 +52,9 @@ __all__ = [ 'chunked_even', 'circular_shifts', 'collapse', - 'collate', 'combination_index', 'consecutive_groups', + 'constrained_batches', 'consumer', 'count_cycle', 'countable', @@ -66,6 +70,7 @@ __all__ = [ 'first', 'groupby_transform', 'ichunked', + 'iequals', 'ilen', 'interleave', 'interleave_evenly', @@ -76,6 +81,7 @@ __all__ = [ 'iterate', 'last', 'locate', + 'longest_common_prefix', 'lstrip', 'make_decorator', 'map_except', @@ -132,9 +138,6 @@ __all__ = [ ] -_marker = object() - - def chunked(iterable, n, strict=False): """Break *iterable* into lists of length *n*: @@ -409,44 +412,6 @@ class peekable: return self._cache[index] -def collate(*iterables, **kwargs): - """Return a sorted merge of the items from each of several already-sorted - *iterables*. - - >>> list(collate('ACDZ', 'AZ', 'JKL')) - ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z'] - - Works lazily, keeping only the next value from each iterable in memory. Use - :func:`collate` to, for example, perform a n-way mergesort of items that - don't fit in memory. - - If a *key* function is specified, the iterables will be sorted according - to its result: - - >>> key = lambda s: int(s) # Sort by numeric value, not by string - >>> list(collate(['1', '10'], ['2', '11'], key=key)) - ['1', '2', '10', '11'] - - - If the *iterables* are sorted in descending order, set *reverse* to - ``True``: - - >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True)) - [5, 4, 3, 2, 1, 0] - - If the elements of the passed-in iterables are out of order, you might get - unexpected results. - - On Python 3.5+, this function is an alias for :func:`heapq.merge`. - - """ - warnings.warn( - "collate is no longer part of more_itertools, use heapq.merge", - DeprecationWarning, - ) - return merge(*iterables, **kwargs) - - def consumer(func): """Decorator that automatically advances a PEP-342-style "reverse iterator" to its first yield point so you don't have to call ``next()`` on it @@ -872,7 +837,9 @@ def windowed(seq, n, fillvalue=None, step=1): yield tuple(window) size = len(window) - if size < n: + if size == 0: + return + elif size < n: yield tuple(chain(window, repeat(fillvalue, n - size))) elif 0 < i < min(step, n): window += (fillvalue,) * i @@ -1645,45 +1612,6 @@ def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None): ) -class UnequalIterablesError(ValueError): - def __init__(self, details=None): - msg = 'Iterables have different lengths' - if details is not None: - msg += (': index 0 has length {}; index {} has length {}').format( - *details - ) - - super().__init__(msg) - - -def _zip_equal_generator(iterables): - for combo in zip_longest(*iterables, fillvalue=_marker): - for val in combo: - if val is _marker: - raise UnequalIterablesError() - yield combo - - -def _zip_equal(*iterables): - # Check whether the iterables are all the same size. - try: - first_size = len(iterables[0]) - for i, it in enumerate(iterables[1:], 1): - size = len(it) - if size != first_size: - break - else: - # If we didn't break out, we can use the built-in zip. - return zip(*iterables) - - # If we did break out, there was a mismatch. - raise UnequalIterablesError(details=(first_size, i, size)) - # If any one of the iterables didn't have a length, start reading - # them until one runs out. - except TypeError: - return _zip_equal_generator(iterables) - - def zip_equal(*iterables): """``zip`` the input *iterables* together, but raise ``UnequalIterablesError`` if they aren't all the same length. @@ -1825,7 +1753,7 @@ def unzip(iterable): of the zipped *iterable*. The ``i``-th iterable contains the ``i``-th element from each element - of the zipped iterable. The first element is used to to determine the + of the zipped iterable. The first element is used to determine the length of the remaining elements. >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] @@ -2375,6 +2303,16 @@ def locate(iterable, pred=bool, window_size=None): return compress(count(), starmap(pred, it)) +def longest_common_prefix(iterables): + """Yield elements of the longest common prefix amongst given *iterables*. + + >>> ''.join(longest_common_prefix(['abcd', 'abc', 'abf'])) + 'ab' + + """ + return (c[0] for c in takewhile(all_equal, zip(*iterables))) + + def lstrip(iterable, pred): """Yield the items from *iterable*, but strip any from the beginning for which *pred* returns ``True``. @@ -2683,7 +2621,7 @@ def difference(iterable, func=sub, *, initial=None): if initial is not None: first = [] - return chain(first, starmap(func, zip(b, a))) + return chain(first, map(func, b, a)) class SequenceView(Sequence): @@ -3326,6 +3264,27 @@ def only(iterable, default=None, too_long=None): return first_value +class _IChunk: + def __init__(self, iterable, n): + self._it = islice(iterable, n) + self._cache = deque() + + def fill_cache(self): + self._cache.extend(self._it) + + def __iter__(self): + return self + + def __next__(self): + try: + return next(self._it) + except StopIteration: + if self._cache: + return self._cache.popleft() + else: + raise + + def ichunked(iterable, n): """Break *iterable* into sub-iterables with *n* elements each. :func:`ichunked` is like :func:`chunked`, but it yields iterables @@ -3347,20 +3306,39 @@ def ichunked(iterable, n): [8, 9, 10, 11] """ - source = iter(iterable) - + source = peekable(iter(iterable)) + ichunk_marker = object() while True: # Check to see whether we're at the end of the source iterable - item = next(source, _marker) - if item is _marker: + item = source.peek(ichunk_marker) + if item is ichunk_marker: return - # Clone the source and yield an n-length slice - source, it = tee(chain([item], source)) - yield islice(it, n) + chunk = _IChunk(source, n) + yield chunk - # Advance the source iterable - consume(source, n) + # Advance the source iterable and fill previous chunk's cache + chunk.fill_cache() + + +def iequals(*iterables): + """Return ``True`` if all given *iterables* are equal to each other, + which means that they contain the same elements in the same order. + + The function is useful for comparing iterables of different data types + or iterables that do not support equality checks. + + >>> iequals("abc", ['a', 'b', 'c'], ('a', 'b', 'c'), iter("abc")) + True + + >>> iequals("abc", "acb") + False + + Not to be confused with :func:`all_equals`, which checks whether all + elements of iterable are equal to each other. + + """ + return all(map(all_equal, zip_longest(*iterables, fillvalue=object()))) def distinct_combinations(iterable, r): @@ -3655,7 +3633,9 @@ class callback_iter: self._aborted = False self._future = None self._wait_seconds = wait_seconds - self._executor = __import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1) + # Lazily import concurrent.future + self._executor = __import__( + ).futures.__import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1) self._iterator = self._reader() def __enter__(self): @@ -3960,7 +3940,7 @@ def combination_index(element, iterable): n, _ = last(pool, default=(n, None)) - # Python versiosn below 3.8 don't have math.comb + # Python versions below 3.8 don't have math.comb index = 1 for i, j in enumerate(reversed(indexes), start=1): j = n - j @@ -4113,7 +4093,7 @@ def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False): If the *strict* keyword argument is ``True``, then ``UnequalIterablesError`` will be raised if any of the iterables have - different lengthss. + different lengths. """ def is_scalar(obj): @@ -4314,3 +4294,53 @@ def minmax(iterable_or_value, *others, key=None, default=_marker): hi, hi_key = y, y_key return lo, hi + + +def constrained_batches( + iterable, max_size, max_count=None, get_len=len, strict=True +): + """Yield batches of items from *iterable* with a combined size limited by + *max_size*. + + >>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1'] + >>> list(constrained_batches(iterable, 10)) + [(b'12345', b'123'), (b'12345678', b'1', b'1'), (b'12', b'1')] + + If a *max_count* is supplied, the number of items per batch is also + limited: + + >>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1'] + >>> list(constrained_batches(iterable, 10, max_count = 2)) + [(b'12345', b'123'), (b'12345678', b'1'), (b'1', b'12'), (b'1',)] + + If a *get_len* function is supplied, use that instead of :func:`len` to + determine item size. + + If *strict* is ``True``, raise ``ValueError`` if any single item is bigger + than *max_size*. Otherwise, allow single items to exceed *max_size*. + """ + if max_size <= 0: + raise ValueError('maximum size must be greater than zero') + + batch = [] + batch_size = 0 + batch_count = 0 + for item in iterable: + item_len = get_len(item) + if strict and item_len > max_size: + raise ValueError('item size exceeds maximum size') + + reached_count = batch_count == max_count + reached_size = item_len + batch_size > max_size + if batch_count and (reached_size or reached_count): + yield tuple(batch) + batch.clear() + batch_size = 0 + batch_count = 0 + + batch.append(item) + batch_size += item_len + batch_count += 1 + + if batch: + yield tuple(batch) diff --git a/lib/pkg_resources/_vendor/more_itertools/more.pyi b/lib/pkg_resources/_vendor/more_itertools/more.pyi index fe7d4bdd..1413fae7 100644 --- a/lib/pkg_resources/_vendor/more_itertools/more.pyi +++ b/lib/pkg_resources/_vendor/more_itertools/more.pyi @@ -72,7 +72,6 @@ class peekable(Generic[_T], Iterator[_T]): @overload def __getitem__(self, index: slice) -> List[_T]: ... -def collate(*iterables: Iterable[_T], **kwargs: Any) -> Iterable[_T]: ... def consumer(func: _GenFn) -> _GenFn: ... def ilen(iterable: Iterable[object]) -> int: ... def iterate(func: Callable[[_T], _T], start: _T) -> Iterator[_T]: ... @@ -179,7 +178,7 @@ def padded( iterable: Iterable[_T], *, n: Optional[int] = ..., - next_multiple: bool = ... + next_multiple: bool = ..., ) -> Iterator[Optional[_T]]: ... @overload def padded( @@ -225,7 +224,7 @@ def zip_equal( __iter1: Iterable[_T], __iter2: Iterable[_T], __iter3: Iterable[_T], - *iterables: Iterable[_T] + *iterables: Iterable[_T], ) -> Iterator[Tuple[_T, ...]]: ... @overload def zip_offset( @@ -233,7 +232,7 @@ def zip_offset( *, offsets: _SizedIterable[int], longest: bool = ..., - fillvalue: None = None + fillvalue: None = None, ) -> Iterator[Tuple[Optional[_T1]]]: ... @overload def zip_offset( @@ -242,7 +241,7 @@ def zip_offset( *, offsets: _SizedIterable[int], longest: bool = ..., - fillvalue: None = None + fillvalue: None = None, ) -> Iterator[Tuple[Optional[_T1], Optional[_T2]]]: ... @overload def zip_offset( @@ -252,7 +251,7 @@ def zip_offset( *iterables: Iterable[_T], offsets: _SizedIterable[int], longest: bool = ..., - fillvalue: None = None + fillvalue: None = None, ) -> Iterator[Tuple[Optional[_T], ...]]: ... @overload def zip_offset( @@ -420,7 +419,7 @@ def difference( iterable: Iterable[_T], func: Callable[[_T, _T], _U] = ..., *, - initial: None = ... + initial: None = ..., ) -> Iterator[Union[_T, _U]]: ... @overload def difference( @@ -529,12 +528,12 @@ def distinct_combinations( def filter_except( validator: Callable[[Any], object], iterable: Iterable[_T], - *exceptions: Type[BaseException] + *exceptions: Type[BaseException], ) -> Iterator[_T]: ... def map_except( function: Callable[[Any], _U], iterable: Iterable[_T], - *exceptions: Type[BaseException] + *exceptions: Type[BaseException], ) -> Iterator[_U]: ... def map_if( iterable: Iterable[Any], @@ -610,7 +609,7 @@ def zip_broadcast( scalar_types: Union[ type, Tuple[Union[type, Tuple[Any, ...]], ...], None ] = ..., - strict: bool = ... + strict: bool = ..., ) -> Iterable[Tuple[_T, ...]]: ... def unique_in_window( iterable: Iterable[_T], n: int, key: Optional[Callable[[_T], _U]] = ... @@ -640,7 +639,7 @@ def minmax( iterable_or_value: Iterable[_SupportsLessThanT], *, key: None = None, - default: _U + default: _U, ) -> Union[_U, Tuple[_SupportsLessThanT, _SupportsLessThanT]]: ... @overload def minmax( @@ -653,12 +652,23 @@ def minmax( def minmax( iterable_or_value: _SupportsLessThanT, __other: _SupportsLessThanT, - *others: _SupportsLessThanT + *others: _SupportsLessThanT, ) -> Tuple[_SupportsLessThanT, _SupportsLessThanT]: ... @overload def minmax( iterable_or_value: _T, __other: _T, *others: _T, - key: Callable[[_T], _SupportsLessThan] + key: Callable[[_T], _SupportsLessThan], ) -> Tuple[_T, _T]: ... +def longest_common_prefix( + iterables: Iterable[Iterable[_T]], +) -> Iterator[_T]: ... +def iequals(*iterables: Iterable[object]) -> bool: ... +def constrained_batches( + iterable: Iterable[object], + max_size: int, + max_count: Optional[int] = ..., + get_len: Callable[[_T], object] = ..., + strict: bool = ..., +) -> Iterator[Tuple[_T]]: ... diff --git a/lib/pkg_resources/_vendor/more_itertools/recipes.py b/lib/pkg_resources/_vendor/more_itertools/recipes.py index a2596423..85796207 100644 --- a/lib/pkg_resources/_vendor/more_itertools/recipes.py +++ b/lib/pkg_resources/_vendor/more_itertools/recipes.py @@ -7,11 +7,16 @@ Some backward-compatible usability improvements have been made. .. [1] http://docs.python.org/library/itertools.html#recipes """ -import warnings +import math +import operator + from collections import deque +from collections.abc import Sized +from functools import reduce from itertools import ( chain, combinations, + compress, count, cycle, groupby, @@ -21,11 +26,11 @@ from itertools import ( tee, zip_longest, ) -import operator from random import randrange, sample, choice __all__ = [ 'all_equal', + 'batched', 'before_and_after', 'consume', 'convolve', @@ -41,6 +46,7 @@ __all__ = [ 'pad_none', 'pairwise', 'partition', + 'polynomial_from_roots', 'powerset', 'prepend', 'quantify', @@ -50,7 +56,9 @@ __all__ = [ 'random_product', 'repeatfunc', 'roundrobin', + 'sieve', 'sliding_window', + 'subslices', 'tabulate', 'tail', 'take', @@ -59,6 +67,8 @@ __all__ = [ 'unique_justseen', ] +_marker = object() + def take(n, iterable): """Return first *n* items of the iterable as a list. @@ -102,7 +112,14 @@ def tail(n, iterable): ['E', 'F', 'G'] """ - return iter(deque(iterable, maxlen=n)) + # If the given iterable has a length, then we can use islice to get its + # final elements. Note that if the iterable is not actually Iterable, + # either islice or deque will throw a TypeError. This is why we don't + # check if it is Iterable. + if isinstance(iterable, Sized): + yield from islice(iterable, max(0, len(iterable) - n), None) + else: + yield from iter(deque(iterable, maxlen=n)) def consume(iterator, n=None): @@ -284,20 +301,83 @@ else: pairwise.__doc__ = _pairwise.__doc__ -def grouper(iterable, n, fillvalue=None): - """Collect data into fixed-length chunks or blocks. +class UnequalIterablesError(ValueError): + def __init__(self, details=None): + msg = 'Iterables have different lengths' + if details is not None: + msg += (': index 0 has length {}; index {} has length {}').format( + *details + ) - >>> list(grouper('ABCDEFG', 3, 'x')) + super().__init__(msg) + + +def _zip_equal_generator(iterables): + for combo in zip_longest(*iterables, fillvalue=_marker): + for val in combo: + if val is _marker: + raise UnequalIterablesError() + yield combo + + +def _zip_equal(*iterables): + # Check whether the iterables are all the same size. + try: + first_size = len(iterables[0]) + for i, it in enumerate(iterables[1:], 1): + size = len(it) + if size != first_size: + break + else: + # If we didn't break out, we can use the built-in zip. + return zip(*iterables) + + # If we did break out, there was a mismatch. + raise UnequalIterablesError(details=(first_size, i, size)) + # If any one of the iterables didn't have a length, start reading + # them until one runs out. + except TypeError: + return _zip_equal_generator(iterables) + + +def grouper(iterable, n, incomplete='fill', fillvalue=None): + """Group elements from *iterable* into fixed-length groups of length *n*. + + >>> list(grouper('ABCDEF', 3)) + [('A', 'B', 'C'), ('D', 'E', 'F')] + + The keyword arguments *incomplete* and *fillvalue* control what happens for + iterables whose length is not a multiple of *n*. + + When *incomplete* is `'fill'`, the last group will contain instances of + *fillvalue*. + + >>> list(grouper('ABCDEFG', 3, incomplete='fill', fillvalue='x')) [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] + When *incomplete* is `'ignore'`, the last group will not be emitted. + + >>> list(grouper('ABCDEFG', 3, incomplete='ignore', fillvalue='x')) + [('A', 'B', 'C'), ('D', 'E', 'F')] + + When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised. + + >>> it = grouper('ABCDEFG', 3, incomplete='strict') + >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + UnequalIterablesError + """ - if isinstance(iterable, int): - warnings.warn( - "grouper expects iterable as first parameter", DeprecationWarning - ) - n, iterable = iterable, n args = [iter(iterable)] * n - return zip_longest(fillvalue=fillvalue, *args) + if incomplete == 'fill': + return zip_longest(*args, fillvalue=fillvalue) + if incomplete == 'strict': + return _zip_equal(*args) + if incomplete == 'ignore': + return zip(*args) + else: + raise ValueError('Expected fill, strict, or ignore') def roundrobin(*iterables): @@ -658,11 +738,12 @@ def before_and_after(predicate, it): transition.append(elem) return - def remainder_iterator(): - yield from transition - yield from it + # Note: this is different from itertools recipes to allow nesting + # before_and_after remainders into before_and_after again. See tests + # for an example. + remainder_iterator = chain(transition, it) - return true_iterator(), remainder_iterator() + return true_iterator(), remainder_iterator def triplewise(iterable): @@ -696,3 +777,65 @@ def sliding_window(iterable, n): for x in it: window.append(x) yield tuple(window) + + +def subslices(iterable): + """Return all contiguous non-empty subslices of *iterable*. + + >>> list(subslices('ABC')) + [['A'], ['A', 'B'], ['A', 'B', 'C'], ['B'], ['B', 'C'], ['C']] + + This is similar to :func:`substrings`, but emits items in a different + order. + """ + seq = list(iterable) + slices = starmap(slice, combinations(range(len(seq) + 1), 2)) + return map(operator.getitem, repeat(seq), slices) + + +def polynomial_from_roots(roots): + """Compute a polynomial's coefficients from its roots. + + >>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3) + >>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60 + [1, -4, -17, 60] + """ + # Use math.prod for Python 3.8+, + prod = getattr(math, 'prod', lambda x: reduce(operator.mul, x, 1)) + roots = list(map(operator.neg, roots)) + return [ + sum(map(prod, combinations(roots, k))) for k in range(len(roots) + 1) + ] + + +def sieve(n): + """Yield the primes less than n. + + >>> list(sieve(30)) + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + """ + isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x))) + limit = isqrt(n) + 1 + data = bytearray([1]) * n + data[:2] = 0, 0 + for p in compress(range(limit), data): + data[p + p : n : p] = bytearray(len(range(p + p, n, p))) + + return compress(count(), data) + + +def batched(iterable, n): + """Batch data into lists of length *n*. The last batch may be shorter. + + >>> list(batched('ABCDEFG', 3)) + [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']] + + This recipe is from the ``itertools`` docs. This library also provides + :func:`chunked`, which has a different implementation. + """ + it = iter(iterable) + while True: + batch = list(islice(it, n)) + if not batch: + break + yield batch diff --git a/lib/pkg_resources/_vendor/more_itertools/recipes.pyi b/lib/pkg_resources/_vendor/more_itertools/recipes.pyi index 4648a41b..29415c5a 100644 --- a/lib/pkg_resources/_vendor/more_itertools/recipes.pyi +++ b/lib/pkg_resources/_vendor/more_itertools/recipes.pyi @@ -6,6 +6,7 @@ from typing import ( Iterator, List, Optional, + Sequence, Tuple, TypeVar, Union, @@ -39,21 +40,11 @@ def repeatfunc( func: Callable[..., _U], times: Optional[int] = ..., *args: Any ) -> Iterator[_U]: ... def pairwise(iterable: Iterable[_T]) -> Iterator[Tuple[_T, _T]]: ... -@overload def grouper( - iterable: Iterable[_T], n: int -) -> Iterator[Tuple[Optional[_T], ...]]: ... -@overload -def grouper( - iterable: Iterable[_T], n: int, fillvalue: _U -) -> Iterator[Tuple[Union[_T, _U], ...]]: ... -@overload -def grouper( # Deprecated interface - iterable: int, n: Iterable[_T] -) -> Iterator[Tuple[Optional[_T], ...]]: ... -@overload -def grouper( # Deprecated interface - iterable: int, n: Iterable[_T], fillvalue: _U + iterable: Iterable[_T], + n: int, + incomplete: str = ..., + fillvalue: _U = ..., ) -> Iterator[Tuple[Union[_T, _U], ...]]: ... def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: ... def partition( @@ -110,3 +101,10 @@ def triplewise(iterable: Iterable[_T]) -> Iterator[Tuple[_T, _T, _T]]: ... def sliding_window( iterable: Iterable[_T], n: int ) -> Iterator[Tuple[_T, ...]]: ... +def subslices(iterable: Iterable[_T]) -> Iterator[List[_T]]: ... +def polynomial_from_roots(roots: Sequence[int]) -> List[int]: ... +def sieve(n: int) -> Iterator[int]: ... +def batched( + iterable: Iterable[_T], + n: int, +) -> Iterator[List[_T]]: ... diff --git a/lib/pkg_resources/_vendor/packaging/__about__.py b/lib/pkg_resources/_vendor/packaging/__about__.py deleted file mode 100644 index 3551bc2d..00000000 --- a/lib/pkg_resources/_vendor/packaging/__about__.py +++ /dev/null @@ -1,26 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] - -__title__ = "packaging" -__summary__ = "Core utilities for Python packages" -__uri__ = "https://github.com/pypa/packaging" - -__version__ = "21.3" - -__author__ = "Donald Stufft and individual contributors" -__email__ = "donald@stufft.io" - -__license__ = "BSD-2-Clause or Apache-2.0" -__copyright__ = "2014-2019 %s" % __author__ diff --git a/lib/pkg_resources/_vendor/packaging/__init__.py b/lib/pkg_resources/_vendor/packaging/__init__.py index 3c50c5dc..4112fec0 100644 --- a/lib/pkg_resources/_vendor/packaging/__init__.py +++ b/lib/pkg_resources/_vendor/packaging/__init__.py @@ -2,24 +2,14 @@ # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from .__about__ import ( - __author__, - __copyright__, - __email__, - __license__, - __summary__, - __title__, - __uri__, - __version__, -) +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] +__version__ = "23.0" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = "2014-2019 %s" % __author__ diff --git a/lib/pkg_resources/_vendor/packaging/_elffile.py b/lib/pkg_resources/_vendor/packaging/_elffile.py new file mode 100644 index 00000000..6fb19b30 --- /dev/null +++ b/lib/pkg_resources/_vendor/packaging/_elffile.py @@ -0,0 +1,108 @@ +""" +ELF file parser. + +This provides a class ``ELFFile`` that parses an ELF executable in a similar +interface to ``ZipFile``. Only the read interface is implemented. + +Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca +ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html +""" + +import enum +import os +import struct +from typing import IO, Optional, Tuple + + +class ELFInvalid(ValueError): + pass + + +class EIClass(enum.IntEnum): + C32 = 1 + C64 = 2 + + +class EIData(enum.IntEnum): + Lsb = 1 + Msb = 2 + + +class EMachine(enum.IntEnum): + I386 = 3 + S390 = 22 + Arm = 40 + X8664 = 62 + AArc64 = 183 + + +class ELFFile: + """ + Representation of an ELF executable. + """ + + def __init__(self, f: IO[bytes]) -> None: + self._f = f + + try: + ident = self._read("16B") + except struct.error: + raise ELFInvalid("unable to parse identification") + magic = bytes(ident[:4]) + if magic != b"\x7fELF": + raise ELFInvalid(f"invalid magic: {magic!r}") + + self.capacity = ident[4] # Format for program header (bitness). + self.encoding = ident[5] # Data structure encoding (endianness). + + try: + # e_fmt: Format for program header. + # p_fmt: Format for section header. + # p_idx: Indexes to find p_type, p_offset, and p_filesz. + e_fmt, self._p_fmt, self._p_idx = { + (1, 1): ("HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB. + (2, 1): ("HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB. + }[(self.capacity, self.encoding)] + except KeyError: + raise ELFInvalid( + f"unrecognized capacity ({self.capacity}) or " + f"encoding ({self.encoding})" + ) + + try: + ( + _, + self.machine, # Architecture type. + _, + _, + self._e_phoff, # Offset of program header. + _, + self.flags, # Processor-specific flags. + _, + self._e_phentsize, # Size of section. + self._e_phnum, # Number of sections. + ) = self._read(e_fmt) + except struct.error as e: + raise ELFInvalid("unable to parse machine and section information") from e + + def _read(self, fmt: str) -> Tuple[int, ...]: + return struct.unpack(fmt, self._f.read(struct.calcsize(fmt))) + + @property + def interpreter(self) -> Optional[str]: + """ + The path recorded in the ``PT_INTERP`` section header. + """ + for index in range(self._e_phnum): + self._f.seek(self._e_phoff + self._e_phentsize * index) + try: + data = self._read(self._p_fmt) + except struct.error: + continue + if data[self._p_idx[0]] != 3: # Not PT_INTERP. + continue + self._f.seek(data[self._p_idx[1]]) + return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0") + return None diff --git a/lib/pkg_resources/_vendor/packaging/_manylinux.py b/lib/pkg_resources/_vendor/packaging/_manylinux.py index 4c379aa6..2f0cc743 100644 --- a/lib/pkg_resources/_vendor/packaging/_manylinux.py +++ b/lib/pkg_resources/_vendor/packaging/_manylinux.py @@ -1,121 +1,58 @@ import collections +import contextlib import functools import os import re -import struct import sys import warnings -from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple +from typing import Dict, Generator, Iterator, NamedTuple, Optional, Tuple + +from ._elffile import EIClass, EIData, ELFFile, EMachine + +EF_ARM_ABIMASK = 0xFF000000 +EF_ARM_ABI_VER5 = 0x05000000 +EF_ARM_ABI_FLOAT_HARD = 0x00000400 -# Python does not provide platform information at sufficient granularity to -# identify the architecture of the running executable in some cases, so we -# determine it dynamically by reading the information from the running -# process. This only applies on Linux, which uses the ELF format. -class _ELFFileHeader: - # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header - class _InvalidELFFileHeader(ValueError): - """ - An invalid ELF file header was found. - """ - - ELF_MAGIC_NUMBER = 0x7F454C46 - ELFCLASS32 = 1 - ELFCLASS64 = 2 - ELFDATA2LSB = 1 - ELFDATA2MSB = 2 - EM_386 = 3 - EM_S390 = 22 - EM_ARM = 40 - EM_X86_64 = 62 - EF_ARM_ABIMASK = 0xFF000000 - EF_ARM_ABI_VER5 = 0x05000000 - EF_ARM_ABI_FLOAT_HARD = 0x00000400 - - def __init__(self, file: IO[bytes]) -> None: - def unpack(fmt: str) -> int: - try: - data = file.read(struct.calcsize(fmt)) - result: Tuple[int, ...] = struct.unpack(fmt, data) - except struct.error: - raise _ELFFileHeader._InvalidELFFileHeader() - return result[0] - - self.e_ident_magic = unpack(">I") - if self.e_ident_magic != self.ELF_MAGIC_NUMBER: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_class = unpack("B") - if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_data = unpack("B") - if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_version = unpack("B") - self.e_ident_osabi = unpack("B") - self.e_ident_abiversion = unpack("B") - self.e_ident_pad = file.read(7) - format_h = "H" - format_i = "I" - format_q = "Q" - format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q - self.e_type = unpack(format_h) - self.e_machine = unpack(format_h) - self.e_version = unpack(format_i) - self.e_entry = unpack(format_p) - self.e_phoff = unpack(format_p) - self.e_shoff = unpack(format_p) - self.e_flags = unpack(format_i) - self.e_ehsize = unpack(format_h) - self.e_phentsize = unpack(format_h) - self.e_phnum = unpack(format_h) - self.e_shentsize = unpack(format_h) - self.e_shnum = unpack(format_h) - self.e_shstrndx = unpack(format_h) - - -def _get_elf_header() -> Optional[_ELFFileHeader]: +@contextlib.contextmanager +def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]: try: - with open(sys.executable, "rb") as f: - elf_header = _ELFFileHeader(f) - except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): - return None - return elf_header + with open(path, "rb") as f: + yield ELFFile(f) + except (OSError, TypeError, ValueError): + yield None -def _is_linux_armhf() -> bool: +def _is_linux_armhf(executable: str) -> bool: # hard-float ABI can be detected from the ELF header of the running # process # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_ARM - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABIMASK - ) == elf_header.EF_ARM_ABI_VER5 - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD - ) == elf_header.EF_ARM_ABI_FLOAT_HARD - return result + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.Arm + and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5 + and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD + ) -def _is_linux_i686() -> bool: - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_386 - return result +def _is_linux_i686(executable: str) -> bool: + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.I386 + ) -def _have_compatible_abi(arch: str) -> bool: +def _have_compatible_abi(executable: str, arch: str) -> bool: if arch == "armv7l": - return _is_linux_armhf() + return _is_linux_armhf(executable) if arch == "i686": - return _is_linux_i686() + return _is_linux_i686(executable) return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} @@ -141,10 +78,10 @@ def _glibc_version_string_confstr() -> Optional[str]: # platform module. # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 try: - # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". - version_string = os.confstr("CS_GNU_LIBC_VERSION") + # Should be a string like "glibc 2.17". + version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION") assert version_string is not None - _, version = version_string.split() + _, version = version_string.rsplit() except (AssertionError, AttributeError, OSError, ValueError): # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... return None @@ -211,8 +148,8 @@ def _parse_glibc_version(version_str: str) -> Tuple[int, int]: m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) if not m: warnings.warn( - "Expected glibc version with 2 components major.minor," - " got: %s" % version_str, + f"Expected glibc version with 2 components major.minor," + f" got: {version_str}", RuntimeWarning, ) return -1, -1 @@ -265,7 +202,7 @@ _LEGACY_MANYLINUX_MAP = { def platform_tags(linux: str, arch: str) -> Iterator[str]: - if not _have_compatible_abi(arch): + if not _have_compatible_abi(sys.executable, arch): return # Oldest glibc to be supported regardless of architecture is (2, 17). too_old_glibc2 = _GLibCVersion(2, 16) diff --git a/lib/pkg_resources/_vendor/packaging/_musllinux.py b/lib/pkg_resources/_vendor/packaging/_musllinux.py index 8ac3059b..706ba600 100644 --- a/lib/pkg_resources/_vendor/packaging/_musllinux.py +++ b/lib/pkg_resources/_vendor/packaging/_musllinux.py @@ -4,68 +4,13 @@ This module implements logic to detect if the currently running Python is linked against musl, and what musl version is used. """ -import contextlib import functools -import operator -import os import re -import struct import subprocess import sys -from typing import IO, Iterator, NamedTuple, Optional, Tuple +from typing import Iterator, NamedTuple, Optional - -def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]: - return struct.unpack(fmt, f.read(struct.calcsize(fmt))) - - -def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]: - """Detect musl libc location by parsing the Python executable. - - Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca - ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html - """ - f.seek(0) - try: - ident = _read_unpacked(f, "16B") - except struct.error: - return None - if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF. - return None - f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version. - - try: - # e_fmt: Format for program header. - # p_fmt: Format for section header. - # p_idx: Indexes to find p_type, p_offset, and p_filesz. - e_fmt, p_fmt, p_idx = { - 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit. - 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit. - }[ident[4]] - except KeyError: - return None - else: - p_get = operator.itemgetter(*p_idx) - - # Find the interpreter section and return its content. - try: - _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt) - except struct.error: - return None - for i in range(e_phnum + 1): - f.seek(e_phoff + e_phentsize * i) - try: - p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt)) - except struct.error: - return None - if p_type != 3: # Not PT_INTERP. - continue - f.seek(p_offset) - interpreter = os.fsdecode(f.read(p_filesz)).strip("\0") - if "musl" not in interpreter: - return None - return interpreter - return None +from ._elffile import ELFFile class _MuslVersion(NamedTuple): @@ -95,13 +40,12 @@ def _get_musl_version(executable: str) -> Optional[_MuslVersion]: Version 1.2.2 Dynamic Program Loader """ - with contextlib.ExitStack() as stack: - try: - f = stack.enter_context(open(executable, "rb")) - except OSError: - return None - ld = _parse_ld_musl_from_elf(f) - if not ld: + try: + with open(executable, "rb") as f: + ld = ELFFile(f).interpreter + except (OSError, TypeError, ValueError): + return None + if ld is None or "musl" not in ld: return None proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) return _parse_musl_version(proc.stderr) diff --git a/lib/pkg_resources/_vendor/packaging/_parser.py b/lib/pkg_resources/_vendor/packaging/_parser.py new file mode 100644 index 00000000..2bc6a8f9 --- /dev/null +++ b/lib/pkg_resources/_vendor/packaging/_parser.py @@ -0,0 +1,328 @@ +"""Handwritten parser of dependency specifiers. + +The docstring for each __parse_* function contains ENBF-inspired grammar representing +the implementation. +""" + +import ast +from typing import Any, List, NamedTuple, Optional, Tuple, Union + +from ._tokenizer import DEFAULT_RULES, Tokenizer + + +class Node: + def __init__(self, value: str) -> None: + self.value = value + + def __str__(self) -> str: + return self.value + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +MarkerVar = Union[Variable, Value] +MarkerItem = Tuple[MarkerVar, Op, MarkerVar] +# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]] +# MarkerList = List[Union["MarkerList", MarkerAtom, str]] +# mypy does not support recursive type definition +# https://github.com/python/mypy/issues/731 +MarkerAtom = Any +MarkerList = List[Any] + + +class ParsedRequirement(NamedTuple): + name: str + url: str + extras: List[str] + specifier: str + marker: Optional[MarkerList] + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for dependency specifier +# -------------------------------------------------------------------------------------- +def parse_requirement(source: str) -> ParsedRequirement: + return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: + """ + requirement = WS? IDENTIFIER WS? extras WS? requirement_details + """ + tokenizer.consume("WS") + + name_token = tokenizer.expect( + "IDENTIFIER", expected="package name at the start of dependency specifier" + ) + name = name_token.text + tokenizer.consume("WS") + + extras = _parse_extras(tokenizer) + tokenizer.consume("WS") + + url, specifier, marker = _parse_requirement_details(tokenizer) + tokenizer.expect("END", expected="end of dependency specifier") + + return ParsedRequirement(name, url, extras, specifier, marker) + + +def _parse_requirement_details( + tokenizer: Tokenizer, +) -> Tuple[str, str, Optional[MarkerList]]: + """ + requirement_details = AT URL (WS requirement_marker?)? + | specifier WS? (requirement_marker)? + """ + + specifier = "" + url = "" + marker = None + + if tokenizer.check("AT"): + tokenizer.read() + tokenizer.consume("WS") + + url_start = tokenizer.position + url = tokenizer.expect("URL", expected="URL after @").text + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + tokenizer.expect("WS", expected="whitespace after URL") + + # The input might end after whitespace. + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, span_start=url_start, after="URL and whitespace" + ) + else: + specifier_start = tokenizer.position + specifier = _parse_specifier(tokenizer) + tokenizer.consume("WS") + + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, + span_start=specifier_start, + after=( + "version specifier" + if specifier + else "name and no valid version specifier" + ), + ) + + return (url, specifier, marker) + + +def _parse_requirement_marker( + tokenizer: Tokenizer, *, span_start: int, after: str +) -> MarkerList: + """ + requirement_marker = SEMICOLON marker WS? + """ + + if not tokenizer.check("SEMICOLON"): + tokenizer.raise_syntax_error( + f"Expected end or semicolon (after {after})", + span_start=span_start, + ) + tokenizer.read() + + marker = _parse_marker(tokenizer) + tokenizer.consume("WS") + + return marker + + +def _parse_extras(tokenizer: Tokenizer) -> List[str]: + """ + extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)? + """ + if not tokenizer.check("LEFT_BRACKET", peek=True): + return [] + + with tokenizer.enclosing_tokens("LEFT_BRACKET", "RIGHT_BRACKET"): + tokenizer.consume("WS") + extras = _parse_extras_list(tokenizer) + tokenizer.consume("WS") + + return extras + + +def _parse_extras_list(tokenizer: Tokenizer) -> List[str]: + """ + extras_list = identifier (wsp* ',' wsp* identifier)* + """ + extras: List[str] = [] + + if not tokenizer.check("IDENTIFIER"): + return extras + + extras.append(tokenizer.read().text) + + while True: + tokenizer.consume("WS") + if tokenizer.check("IDENTIFIER", peek=True): + tokenizer.raise_syntax_error("Expected comma between extra names") + elif not tokenizer.check("COMMA"): + break + + tokenizer.read() + tokenizer.consume("WS") + + extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma") + extras.append(extra_token.text) + + return extras + + +def _parse_specifier(tokenizer: Tokenizer) -> str: + """ + specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS + | WS? version_many WS? + """ + with tokenizer.enclosing_tokens("LEFT_PARENTHESIS", "RIGHT_PARENTHESIS"): + tokenizer.consume("WS") + parsed_specifiers = _parse_version_many(tokenizer) + tokenizer.consume("WS") + + return parsed_specifiers + + +def _parse_version_many(tokenizer: Tokenizer) -> str: + """ + version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)? + """ + parsed_specifiers = "" + while tokenizer.check("SPECIFIER"): + parsed_specifiers += tokenizer.read().text + tokenizer.consume("WS") + if not tokenizer.check("COMMA"): + break + parsed_specifiers += tokenizer.read().text + tokenizer.consume("WS") + + return parsed_specifiers + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for marker expression +# -------------------------------------------------------------------------------------- +def parse_marker(source: str) -> MarkerList: + return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_marker(tokenizer: Tokenizer) -> MarkerList: + """ + marker = marker_atom (BOOLOP marker_atom)+ + """ + expression = [_parse_marker_atom(tokenizer)] + while tokenizer.check("BOOLOP"): + token = tokenizer.read() + expr_right = _parse_marker_atom(tokenizer) + expression.extend((token.text, expr_right)) + return expression + + +def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom: + """ + marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS? + | WS? marker_item WS? + """ + + tokenizer.consume("WS") + if tokenizer.check("LEFT_PARENTHESIS", peek=True): + with tokenizer.enclosing_tokens("LEFT_PARENTHESIS", "RIGHT_PARENTHESIS"): + tokenizer.consume("WS") + marker: MarkerAtom = _parse_marker(tokenizer) + tokenizer.consume("WS") + else: + marker = _parse_marker_item(tokenizer) + tokenizer.consume("WS") + return marker + + +def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem: + """ + marker_item = WS? marker_var WS? marker_op WS? marker_var WS? + """ + tokenizer.consume("WS") + marker_var_left = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + marker_op = _parse_marker_op(tokenizer) + tokenizer.consume("WS") + marker_var_right = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + return (marker_var_left, marker_op, marker_var_right) + + +def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: + """ + marker_var = VARIABLE | QUOTED_STRING + """ + if tokenizer.check("VARIABLE"): + return process_env_var(tokenizer.read().text.replace(".", "_")) + elif tokenizer.check("QUOTED_STRING"): + return process_python_str(tokenizer.read().text) + else: + tokenizer.raise_syntax_error( + message="Expected a marker variable or quoted string" + ) + + +def process_env_var(env_var: str) -> Variable: + if ( + env_var == "platform_python_implementation" + or env_var == "python_implementation" + ): + return Variable("platform_python_implementation") + else: + return Variable(env_var) + + +def process_python_str(python_str: str) -> Value: + value = ast.literal_eval(python_str) + return Value(str(value)) + + +def _parse_marker_op(tokenizer: Tokenizer) -> Op: + """ + marker_op = IN | NOT IN | OP + """ + if tokenizer.check("IN"): + tokenizer.read() + return Op("in") + elif tokenizer.check("NOT"): + tokenizer.read() + tokenizer.expect("WS", expected="whitespace after 'not'") + tokenizer.expect("IN", expected="'in' after 'not'") + return Op("not in") + elif tokenizer.check("OP"): + return Op(tokenizer.read().text) + else: + return tokenizer.raise_syntax_error( + "Expected marker operator, one of " + "<=, <, !=, ==, >=, >, ~=, ===, in, not in" + ) diff --git a/lib/pkg_resources/_vendor/packaging/_tokenizer.py b/lib/pkg_resources/_vendor/packaging/_tokenizer.py new file mode 100644 index 00000000..b1fb207c --- /dev/null +++ b/lib/pkg_resources/_vendor/packaging/_tokenizer.py @@ -0,0 +1,188 @@ +import contextlib +import re +from dataclasses import dataclass +from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union + +from .specifiers import Specifier + + +@dataclass +class Token: + name: str + text: str + position: int + + +class ParserSyntaxError(Exception): + """The provided source text could not be parsed correctly.""" + + def __init__( + self, + message: str, + *, + source: str, + span: Tuple[int, int], + ) -> None: + self.span = span + self.message = message + self.source = source + + super().__init__() + + def __str__(self) -> str: + marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^" + return "\n ".join([self.message, self.source, marker]) + + +DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = { + "LEFT_PARENTHESIS": r"\(", + "RIGHT_PARENTHESIS": r"\)", + "LEFT_BRACKET": r"\[", + "RIGHT_BRACKET": r"\]", + "SEMICOLON": r";", + "COMMA": r",", + "QUOTED_STRING": re.compile( + r""" + ( + ('[^']*') + | + ("[^"]*") + ) + """, + re.VERBOSE, + ), + "OP": r"(===|==|~=|!=|<=|>=|<|>)", + "BOOLOP": r"\b(or|and)\b", + "IN": r"\bin\b", + "NOT": r"\bnot\b", + "VARIABLE": re.compile( + r""" + \b( + python_version + |python_full_version + |os[._]name + |sys[._]platform + |platform_(release|system) + |platform[._](version|machine|python_implementation) + |python_implementation + |implementation_(name|version) + |extra + )\b + """, + re.VERBOSE, + ), + "SPECIFIER": re.compile( + Specifier._operator_regex_str + Specifier._version_regex_str, + re.VERBOSE | re.IGNORECASE, + ), + "AT": r"\@", + "URL": r"[^ \t]+", + "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b", + "WS": r"[ \t]+", + "END": r"$", +} + + +class Tokenizer: + """Context-sensitive token parsing. + + Provides methods to examine the input stream to check whether the next token + matches. + """ + + def __init__( + self, + source: str, + *, + rules: "Dict[str, Union[str, re.Pattern[str]]]", + ) -> None: + self.source = source + self.rules: Dict[str, re.Pattern[str]] = { + name: re.compile(pattern) for name, pattern in rules.items() + } + self.next_token: Optional[Token] = None + self.position = 0 + + def consume(self, name: str) -> None: + """Move beyond provided token name, if at current position.""" + if self.check(name): + self.read() + + def check(self, name: str, *, peek: bool = False) -> bool: + """Check whether the next token has the provided name. + + By default, if the check succeeds, the token *must* be read before + another check. If `peek` is set to `True`, the token is not loaded and + would need to be checked again. + """ + assert ( + self.next_token is None + ), f"Cannot check for {name!r}, already have {self.next_token!r}" + assert name in self.rules, f"Unknown token name: {name!r}" + + expression = self.rules[name] + + match = expression.match(self.source, self.position) + if match is None: + return False + if not peek: + self.next_token = Token(name, match[0], self.position) + return True + + def expect(self, name: str, *, expected: str) -> Token: + """Expect a certain token name next, failing with a syntax error otherwise. + + The token is *not* read. + """ + if not self.check(name): + raise self.raise_syntax_error(f"Expected {expected}") + return self.read() + + def read(self) -> Token: + """Consume the next token and return it.""" + token = self.next_token + assert token is not None + + self.position += len(token.text) + self.next_token = None + + return token + + def raise_syntax_error( + self, + message: str, + *, + span_start: Optional[int] = None, + span_end: Optional[int] = None, + ) -> NoReturn: + """Raise ParserSyntaxError at the given position.""" + span = ( + self.position if span_start is None else span_start, + self.position if span_end is None else span_end, + ) + raise ParserSyntaxError( + message, + source=self.source, + span=span, + ) + + @contextlib.contextmanager + def enclosing_tokens(self, open_token: str, close_token: str) -> Iterator[bool]: + if self.check(open_token): + open_position = self.position + self.read() + else: + open_position = None + + yield open_position is not None + + if open_position is None: + return + + if not self.check(close_token): + self.raise_syntax_error( + f"Expected closing {close_token}", + span_start=open_position, + ) + + self.read() diff --git a/lib/pkg_resources/_vendor/packaging/markers.py b/lib/pkg_resources/_vendor/packaging/markers.py index 18769b09..68369c98 100644 --- a/lib/pkg_resources/_vendor/packaging/markers.py +++ b/lib/pkg_resources/_vendor/packaging/markers.py @@ -8,19 +8,10 @@ import platform import sys from typing import Any, Callable, Dict, List, Optional, Tuple, Union -from pkg_resources.extern.pyparsing import ( # noqa: N817 - Forward, - Group, - Literal as L, - ParseException, - ParseResults, - QuotedString, - ZeroOrMore, - stringEnd, - stringStart, -) - +from ._parser import MarkerAtom, MarkerList, Op, Value, Variable, parse_marker +from ._tokenizer import ParserSyntaxError from .specifiers import InvalidSpecifier, Specifier +from .utils import canonicalize_name __all__ = [ "InvalidMarker", @@ -52,101 +43,24 @@ class UndefinedEnvironmentName(ValueError): """ -class Node: - def __init__(self, value: Any) -> None: - self.value = value - - def __str__(self) -> str: - return str(self.value) - - def __repr__(self) -> str: - return f"<{self.__class__.__name__}('{self}')>" - - def serialize(self) -> str: - raise NotImplementedError - - -class Variable(Node): - def serialize(self) -> str: - return str(self) - - -class Value(Node): - def serialize(self) -> str: - return f'"{self}"' - - -class Op(Node): - def serialize(self) -> str: - return str(self) - - -VARIABLE = ( - L("implementation_version") - | L("platform_python_implementation") - | L("implementation_name") - | L("python_full_version") - | L("platform_release") - | L("platform_version") - | L("platform_machine") - | L("platform_system") - | L("python_version") - | L("sys_platform") - | L("os_name") - | L("os.name") # PEP-345 - | L("sys.platform") # PEP-345 - | L("platform.version") # PEP-345 - | L("platform.machine") # PEP-345 - | L("platform.python_implementation") # PEP-345 - | L("python_implementation") # undocumented setuptools legacy - | L("extra") # PEP-508 -) -ALIASES = { - "os.name": "os_name", - "sys.platform": "sys_platform", - "platform.version": "platform_version", - "platform.machine": "platform_machine", - "platform.python_implementation": "platform_python_implementation", - "python_implementation": "platform_python_implementation", -} -VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) - -VERSION_CMP = ( - L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") -) - -MARKER_OP = VERSION_CMP | L("not in") | L("in") -MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) - -MARKER_VALUE = QuotedString("'") | QuotedString('"') -MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) - -BOOLOP = L("and") | L("or") - -MARKER_VAR = VARIABLE | MARKER_VALUE - -MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) -MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) - -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() - -MARKER_EXPR = Forward() -MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) -MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) - -MARKER = stringStart + MARKER_EXPR + stringEnd - - -def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]: - if isinstance(results, ParseResults): - return [_coerce_parse_result(i) for i in results] - else: - return results +def _normalize_extra_values(results: Any) -> Any: + """ + Normalize extra values. + """ + if isinstance(results[0], tuple): + lhs, op, rhs = results[0] + if isinstance(lhs, Variable) and lhs.value == "extra": + normalized_extra = canonicalize_name(rhs.value) + rhs = Value(normalized_extra) + elif isinstance(rhs, Variable) and rhs.value == "extra": + normalized_extra = canonicalize_name(lhs.value) + lhs = Value(normalized_extra) + results[0] = lhs, op, rhs + return results def _format_marker( - marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True + marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True ) -> str: assert isinstance(marker, (list, tuple, str)) @@ -192,7 +106,7 @@ def _eval_op(lhs: str, op: Op, rhs: str) -> bool: except InvalidSpecifier: pass else: - return spec.contains(lhs) + return spec.contains(lhs, prereleases=True) oper: Optional[Operator] = _operators.get(op.serialize()) if oper is None: @@ -201,25 +115,19 @@ def _eval_op(lhs: str, op: Op, rhs: str) -> bool: return oper(lhs, rhs) -class Undefined: - pass +def _normalize(*values: str, key: str) -> Tuple[str, ...]: + # PEP 685 – Comparison of extra names for optional distribution dependencies + # https://peps.python.org/pep-0685/ + # > When comparing extra names, tools MUST normalize the names being + # > compared using the semantics outlined in PEP 503 for names + if key == "extra": + return tuple(canonicalize_name(v) for v in values) + + # other environment markers don't have such standards + return values -_undefined = Undefined() - - -def _get_env(environment: Dict[str, str], name: str) -> str: - value: Union[str, Undefined] = environment.get(name, _undefined) - - if isinstance(value, Undefined): - raise UndefinedEnvironmentName( - f"{name!r} does not exist in evaluation environment." - ) - - return value - - -def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: +def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool: groups: List[List[bool]] = [[]] for marker in markers: @@ -231,12 +139,15 @@ def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: lhs, op, rhs = marker if isinstance(lhs, Variable): - lhs_value = _get_env(environment, lhs.value) + environment_key = lhs.value + lhs_value = environment[environment_key] rhs_value = rhs.value else: lhs_value = lhs.value - rhs_value = _get_env(environment, rhs.value) + environment_key = rhs.value + rhs_value = environment[environment_key] + lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) groups[-1].append(_eval_op(lhs_value, op, rhs_value)) else: assert marker in ["and", "or"] @@ -274,13 +185,29 @@ def default_environment() -> Dict[str, str]: class Marker: def __init__(self, marker: str) -> None: + # Note: We create a Marker object without calling this constructor in + # packaging.requirements.Requirement. If any additional logic is + # added here, make sure to mirror/adapt Requirement. try: - self._markers = _coerce_parse_result(MARKER.parseString(marker)) - except ParseException as e: - raise InvalidMarker( - f"Invalid marker: {marker!r}, parse error at " - f"{marker[e.loc : e.loc + 8]!r}" - ) + self._markers = _normalize_extra_values(parse_marker(marker)) + # The attribute `_markers` can be described in terms of a recursive type: + # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] + # + # For example, the following expression: + # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") + # + # is parsed into: + # [ + # (, ')>, ), + # 'and', + # [ + # (, , ), + # 'or', + # (, , ) + # ] + # ] + except ParserSyntaxError as e: + raise InvalidMarker(str(e)) from e def __str__(self) -> str: return _format_marker(self._markers) @@ -288,6 +215,15 @@ class Marker: def __repr__(self) -> str: return f"" + def __hash__(self) -> int: + return hash((self.__class__.__name__, str(self))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Marker): + return NotImplemented + + return str(self) == str(other) + def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: """Evaluate a marker. @@ -298,7 +234,12 @@ class Marker: The environment is determined from the current Python process. """ current_environment = default_environment() + current_environment["extra"] = "" if environment is not None: current_environment.update(environment) + # The API used to allow setting extra to None. We need to handle this + # case for backwards compatibility. + if current_environment["extra"] is None: + current_environment["extra"] = "" return _evaluate_markers(self._markers, current_environment) diff --git a/lib/pkg_resources/_vendor/packaging/requirements.py b/lib/pkg_resources/_vendor/packaging/requirements.py index 6af14ec4..a9f9b9c7 100644 --- a/lib/pkg_resources/_vendor/packaging/requirements.py +++ b/lib/pkg_resources/_vendor/packaging/requirements.py @@ -2,26 +2,13 @@ # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -import re -import string import urllib.parse -from typing import List, Optional as TOptional, Set +from typing import Any, List, Optional, Set -from pkg_resources.extern.pyparsing import ( # noqa - Combine, - Literal as L, - Optional, - ParseException, - Regex, - Word, - ZeroOrMore, - originalTextFor, - stringEnd, - stringStart, -) - -from .markers import MARKER_EXPR, Marker -from .specifiers import LegacySpecifier, Specifier, SpecifierSet +from ._parser import parse_requirement +from ._tokenizer import ParserSyntaxError +from .markers import Marker, _normalize_extra_values +from .specifiers import SpecifierSet class InvalidRequirement(ValueError): @@ -30,60 +17,6 @@ class InvalidRequirement(ValueError): """ -ALPHANUM = Word(string.ascii_letters + string.digits) - -LBRACKET = L("[").suppress() -RBRACKET = L("]").suppress() -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() -COMMA = L(",").suppress() -SEMICOLON = L(";").suppress() -AT = L("@").suppress() - -PUNCTUATION = Word("-_.") -IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) -IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) - -NAME = IDENTIFIER("name") -EXTRA = IDENTIFIER - -URI = Regex(r"[^ ]+")("url") -URL = AT + URI - -EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) -EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") - -VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) -VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) - -VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY -VERSION_MANY = Combine( - VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False -)("_raw_spec") -_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY) -_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") - -VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") -VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) - -MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") -MARKER_EXPR.setParseAction( - lambda s, l, t: Marker(s[t._original_start : t._original_end]) -) -MARKER_SEPARATOR = SEMICOLON -MARKER = MARKER_SEPARATOR + MARKER_EXPR - -VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) -URL_AND_MARKER = URL + Optional(MARKER) - -NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) - -REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd -# pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see -# issue #104 -REQUIREMENT.parseString("x[]") - - class Requirement: """Parse a requirement. @@ -99,28 +32,29 @@ class Requirement: def __init__(self, requirement_string: str) -> None: try: - req = REQUIREMENT.parseString(requirement_string) - except ParseException as e: - raise InvalidRequirement( - f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}' - ) + parsed = parse_requirement(requirement_string) + except ParserSyntaxError as e: + raise InvalidRequirement(str(e)) from e - self.name: str = req.name - if req.url: - parsed_url = urllib.parse.urlparse(req.url) + self.name: str = parsed.name + if parsed.url: + parsed_url = urllib.parse.urlparse(parsed.url) if parsed_url.scheme == "file": - if urllib.parse.urlunparse(parsed_url) != req.url: + if urllib.parse.urlunparse(parsed_url) != parsed.url: raise InvalidRequirement("Invalid URL given") elif not (parsed_url.scheme and parsed_url.netloc) or ( not parsed_url.scheme and not parsed_url.netloc ): - raise InvalidRequirement(f"Invalid URL: {req.url}") - self.url: TOptional[str] = req.url + raise InvalidRequirement(f"Invalid URL: {parsed.url}") + self.url: Optional[str] = parsed.url else: self.url = None - self.extras: Set[str] = set(req.extras.asList() if req.extras else []) - self.specifier: SpecifierSet = SpecifierSet(req.specifier) - self.marker: TOptional[Marker] = req.marker if req.marker else None + self.extras: Set[str] = set(parsed.extras if parsed.extras else []) + self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) + self.marker: Optional[Marker] = None + if parsed.marker is not None: + self.marker = Marker.__new__(Marker) + self.marker._markers = _normalize_extra_values(parsed.marker) def __str__(self) -> str: parts: List[str] = [self.name] @@ -144,3 +78,18 @@ class Requirement: def __repr__(self) -> str: return f"" + + def __hash__(self) -> int: + return hash((self.__class__.__name__, str(self))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Requirement): + return NotImplemented + + return ( + self.name == other.name + and self.extras == other.extras + and self.specifier == other.specifier + and self.url == other.url + and self.marker == other.marker + ) diff --git a/lib/pkg_resources/_vendor/packaging/specifiers.py b/lib/pkg_resources/_vendor/packaging/specifiers.py index 0e218a6f..e715ecc8 100644 --- a/lib/pkg_resources/_vendor/packaging/specifiers.py +++ b/lib/pkg_resources/_vendor/packaging/specifiers.py @@ -1,20 +1,22 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. +""" +.. testsetup:: + + from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier + from packaging.version import Version +""" import abc -import functools import itertools import re -import warnings from typing import ( Callable, - Dict, Iterable, Iterator, List, Optional, - Pattern, Set, Tuple, TypeVar, @@ -22,17 +24,28 @@ from typing import ( ) from .utils import canonicalize_version -from .version import LegacyVersion, Version, parse +from .version import Version -ParsedVersion = Union[Version, LegacyVersion] -UnparsedVersion = Union[Version, LegacyVersion, str] -VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion) -CallableOperator = Callable[[ParsedVersion, str], bool] +UnparsedVersion = Union[Version, str] +UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion) +CallableOperator = Callable[[Version, str], bool] + + +def _coerce_version(version: UnparsedVersion) -> Version: + if not isinstance(version, Version): + version = Version(version) + return version class InvalidSpecifier(ValueError): """ - An invalid specifier was found, users should refer to PEP 440. + Raised when attempting to create a :class:`Specifier` with a specifier + string that is invalid. + + >>> Specifier("lolwat") + Traceback (most recent call last): + ... + packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' """ @@ -40,35 +53,39 @@ class BaseSpecifier(metaclass=abc.ABCMeta): @abc.abstractmethod def __str__(self) -> str: """ - Returns the str representation of this Specifier like object. This + Returns the str representation of this Specifier-like object. This should be representative of the Specifier itself. """ @abc.abstractmethod def __hash__(self) -> int: """ - Returns a hash value for this Specifier like object. + Returns a hash value for this Specifier-like object. """ @abc.abstractmethod def __eq__(self, other: object) -> bool: """ - Returns a boolean representing whether or not the two Specifier like + Returns a boolean representing whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. """ - @abc.abstractproperty + @property + @abc.abstractmethod def prereleases(self) -> Optional[bool]: - """ - Returns whether or not pre-releases as a whole are allowed by this - specifier. + """Whether or not pre-releases as a whole are allowed. + + This can be set to either ``True`` or ``False`` to explicitly enable or disable + prereleases or it can be set to ``None`` (the default) to use default semantics. """ @prereleases.setter def prereleases(self, value: bool) -> None: - """ - Sets whether or not pre-releases as a whole are allowed by this - specifier. + """Setter for :attr:`prereleases`. + + :param value: The value to set. """ @abc.abstractmethod @@ -79,227 +96,28 @@ class BaseSpecifier(metaclass=abc.ABCMeta): @abc.abstractmethod def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: """ Takes an iterable of items and filters them so that only items which are contained within this specifier are allowed in it. """ -class _IndividualSpecifier(BaseSpecifier): +class Specifier(BaseSpecifier): + """This class abstracts handling of version specifiers. - _operators: Dict[str, str] = {} - _regex: Pattern[str] + .. tip:: - def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: - match = self._regex.search(spec) - if not match: - raise InvalidSpecifier(f"Invalid specifier: '{spec}'") + It is generally not required to instantiate this manually. You should instead + prefer to work with :class:`SpecifierSet` instead, which can parse + comma-separated version specifiers (which is what package metadata contains). + """ - self._spec: Tuple[str, str] = ( - match.group("operator").strip(), - match.group("version").strip(), - ) - - # Store whether or not this Specifier should accept prereleases - self._prereleases = prereleases - - def __repr__(self) -> str: - pre = ( - f", prereleases={self.prereleases!r}" - if self._prereleases is not None - else "" - ) - - return f"<{self.__class__.__name__}({str(self)!r}{pre})>" - - def __str__(self) -> str: - return "{}{}".format(*self._spec) - - @property - def _canonical_spec(self) -> Tuple[str, str]: - return self._spec[0], canonicalize_version(self._spec[1]) - - def __hash__(self) -> int: - return hash(self._canonical_spec) - - def __eq__(self, other: object) -> bool: - if isinstance(other, str): - try: - other = self.__class__(str(other)) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._canonical_spec == other._canonical_spec - - def _get_operator(self, op: str) -> CallableOperator: - operator_callable: CallableOperator = getattr( - self, f"_compare_{self._operators[op]}" - ) - return operator_callable - - def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion: - if not isinstance(version, (LegacyVersion, Version)): - version = parse(version) - return version - - @property - def operator(self) -> str: - return self._spec[0] - - @property - def version(self) -> str: - return self._spec[1] - - @property - def prereleases(self) -> Optional[bool]: - return self._prereleases - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - def __contains__(self, item: str) -> bool: - return self.contains(item) - - def contains( - self, item: UnparsedVersion, prereleases: Optional[bool] = None - ) -> bool: - - # Determine if prereleases are to be allowed or not. - if prereleases is None: - prereleases = self.prereleases - - # Normalize item to a Version or LegacyVersion, this allows us to have - # a shortcut for ``"2.0" in Specifier(">=2") - normalized_item = self._coerce_version(item) - - # Determine if we should be supporting prereleases in this specifier - # or not, if we do not support prereleases than we can short circuit - # logic if this version is a prereleases. - if normalized_item.is_prerelease and not prereleases: - return False - - # Actually do the comparison to determine if this item is contained - # within this Specifier or not. - operator_callable: CallableOperator = self._get_operator(self.operator) - return operator_callable(normalized_item, self.version) - - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - - yielded = False - found_prereleases = [] - - kw = {"prereleases": prereleases if prereleases is not None else True} - - # Attempt to iterate over all the values in the iterable and if any of - # them match, yield them. - for version in iterable: - parsed_version = self._coerce_version(version) - - if self.contains(parsed_version, **kw): - # If our version is a prerelease, and we were not set to allow - # prereleases, then we'll store it for later in case nothing - # else matches this specifier. - if parsed_version.is_prerelease and not ( - prereleases or self.prereleases - ): - found_prereleases.append(version) - # Either this is not a prerelease, or we should have been - # accepting prereleases from the beginning. - else: - yielded = True - yield version - - # Now that we've iterated over everything, determine if we've yielded - # any values, and if we have not and we have any prereleases stored up - # then we will go ahead and yield the prereleases. - if not yielded and found_prereleases: - for version in found_prereleases: - yield version - - -class LegacySpecifier(_IndividualSpecifier): - - _regex_str = r""" - (?P(==|!=|<=|>=|<|>)) - \s* - (?P - [^,;\s)]* # Since this is a "legacy" specifier, and the version - # string can be just about anything, we match everything - # except for whitespace, a semi-colon for marker support, - # a closing paren since versions can be enclosed in - # them, and a comma since it's a version separator. - ) - """ - - _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - } - - def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: - super().__init__(spec, prereleases) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion: - if not isinstance(version, LegacyVersion): - version = LegacyVersion(str(version)) - return version - - def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective == self._coerce_version(spec) - - def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective != self._coerce_version(spec) - - def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective <= self._coerce_version(spec) - - def _compare_greater_than_equal( - self, prospective: LegacyVersion, spec: str - ) -> bool: - return prospective >= self._coerce_version(spec) - - def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective < self._coerce_version(spec) - - def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective > self._coerce_version(spec) - - -def _require_version_compare( - fn: Callable[["Specifier", ParsedVersion, str], bool] -) -> Callable[["Specifier", ParsedVersion, str], bool]: - @functools.wraps(fn) - def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool: - if not isinstance(prospective, Version): - return False - return fn(self, prospective, spec) - - return wrapped - - -class Specifier(_IndividualSpecifier): - - _regex_str = r""" + _operator_regex_str = r""" (?P(~=|==|!=|<=|>=|<|>|===)) + """ + _version_regex_str = r""" (?P (?: # The identity operators allow for an escape hatch that will @@ -309,8 +127,10 @@ class Specifier(_IndividualSpecifier): # but included entirely as an escape hatch. (?<====) # Only match for the identity operator \s* - [^\s]* # We just match everything, except for whitespace - # since we are only testing for strict identity. + [^\s;)]* # The arbitrary version can be just about anything, + # we match everything except for whitespace, a + # semi-colon for marker support, and a closing paren + # since versions can be enclosed in them. ) | (?: @@ -323,23 +143,23 @@ class Specifier(_IndividualSpecifier): v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - # You cannot use a wild card and a dev or local version - # together so group them with a | and make them optional. + # You cannot use a wild card and a pre-release, post-release, a dev or + # local version together so group them with a | and make them optional. (?: + \.\* # Wild card syntax of .* + | + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local - | - \.\* # Wild card syntax of .* )? ) | @@ -354,7 +174,7 @@ class Specifier(_IndividualSpecifier): [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) (?: # pre release [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) + (alpha|beta|preview|pre|a|b|c|rc) [-_\.]? [0-9]* )? @@ -379,7 +199,7 @@ class Specifier(_IndividualSpecifier): [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) + (alpha|beta|preview|pre|a|b|c|rc) [-_\.]? [0-9]* )? @@ -391,7 +211,10 @@ class Specifier(_IndividualSpecifier): ) """ - _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + _regex = re.compile( + r"^\s*" + _operator_regex_str + _version_regex_str + r"\s*$", + re.VERBOSE | re.IGNORECASE, + ) _operators = { "~=": "compatible", @@ -404,8 +227,152 @@ class Specifier(_IndividualSpecifier): "===": "arbitrary", } - @_require_version_compare - def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool: + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + """Initialize a Specifier instance. + + :param spec: + The string representation of a specifier which will be parsed and + normalized before use. + :param prereleases: + This tells the specifier if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + :raises InvalidSpecifier: + If the given specifier is invalid (i.e. bad syntax). + """ + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: '{spec}'") + + self._spec: Tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + @property + def prereleases(self) -> bool: + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if Version(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + @property + def operator(self) -> str: + """The operator of this specifier. + + >>> Specifier("==1.2.3").operator + '==' + """ + return self._spec[0] + + @property + def version(self) -> str: + """The version of this specifier. + + >>> Specifier("==1.2.3").version + '1.2.3' + """ + return self._spec[1] + + def __repr__(self) -> str: + """A representation of the Specifier that shows all internal state. + + >>> Specifier('>=1.0.0') + =1.0.0')> + >>> Specifier('>=1.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> Specifier('>=1.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + + def __str__(self) -> str: + """A string representation of the Specifier that can be round-tripped. + + >>> str(Specifier('>=1.0.0')) + '>=1.0.0' + >>> str(Specifier('>=1.0.0', prereleases=False)) + '>=1.0.0' + """ + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> Tuple[str, str]: + canonical_version = canonicalize_version( + self._spec[1], + strip_trailing_zero=(self._spec[0] != "~="), + ) + return self._spec[0], canonical_version + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + """Whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") + True + >>> (Specifier("==1.2.3", prereleases=False) == + ... Specifier("==1.2.3", prereleases=True)) + True + >>> Specifier("==1.2.3") == "==1.2.3" + True + >>> Specifier("==1.2.3") == Specifier("==1.2.4") + False + >>> Specifier("==1.2.3") == Specifier("~=1.2.3") + False + """ + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _compare_compatible(self, prospective: Version, spec: str) -> bool: # Compatible releases have an equivalent combination of >= and ==. That # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to @@ -426,34 +393,33 @@ class Specifier(_IndividualSpecifier): prospective, prefix ) - @_require_version_compare - def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool: + def _compare_equal(self, prospective: Version, spec: str) -> bool: # We need special logic to handle prefix matching if spec.endswith(".*"): # In the case of prefix matching we want to ignore local segment. - prospective = Version(prospective.public) + normalized_prospective = canonicalize_version(prospective.public) + # Get the normalized version string ignoring the trailing .* + normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) # Split the spec out by dots, and pretend that there is an implicit # dot in between a release segment and a pre-release segment. - split_spec = _version_split(spec[:-2]) # Remove the trailing .* + split_spec = _version_split(normalized_spec) # Split the prospective version out by dots, and pretend that there # is an implicit dot in between a release segment and a pre-release # segment. - split_prospective = _version_split(str(prospective)) + split_prospective = _version_split(normalized_prospective) + + # 0-pad the prospective version before shortening it to get the correct + # shortened version. + padded_prospective, _ = _pad_version(split_prospective, split_spec) # Shorten the prospective version to be the same length as the spec # so that we can determine if the specifier is a prefix of the # prospective version or not. - shortened_prospective = split_prospective[: len(split_spec)] + shortened_prospective = padded_prospective[: len(split_spec)] - # Pad out our two sides with zeros so that they both equal the same - # length. - padded_spec, padded_prospective = _pad_version( - split_spec, shortened_prospective - ) - - return padded_prospective == padded_spec + return shortened_prospective == split_spec else: # Convert our spec string into a Version spec_version = Version(spec) @@ -466,30 +432,24 @@ class Specifier(_IndividualSpecifier): return prospective == spec_version - @_require_version_compare - def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool: + def _compare_not_equal(self, prospective: Version, spec: str) -> bool: return not self._compare_equal(prospective, spec) - @_require_version_compare - def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool: + def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool: # NB: Local version identifiers are NOT permitted in the version # specifier, so local version labels can be universally removed from # the prospective version. return Version(prospective.public) <= Version(spec) - @_require_version_compare - def _compare_greater_than_equal( - self, prospective: ParsedVersion, spec: str - ) -> bool: + def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool: # NB: Local version identifiers are NOT permitted in the version # specifier, so local version labels can be universally removed from # the prospective version. return Version(prospective.public) >= Version(spec) - @_require_version_compare - def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool: + def _compare_less_than(self, prospective: Version, spec_str: str) -> bool: # Convert our spec to a Version instance, since we'll want to work with # it as a version. @@ -514,8 +474,7 @@ class Specifier(_IndividualSpecifier): # version in the spec. return True - @_require_version_compare - def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool: + def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool: # Convert our spec to a Version instance, since we'll want to work with # it as a version. @@ -549,34 +508,133 @@ class Specifier(_IndividualSpecifier): def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: return str(prospective).lower() == str(spec).lower() - @property - def prereleases(self) -> bool: + def __contains__(self, item: Union[str, Version]) -> bool: + """Return whether or not the item is contained in this specifier. - # If there is an explicit prereleases set for this, then we'll just - # blindly use that. - if self._prereleases is not None: - return self._prereleases + :param item: The item to check for. - # Look at all of our specifiers and determine if they are inclusive - # operators, and if they are if they are including an explicit - # prerelease. - operator, version = self._spec - if operator in ["==", ">=", "<=", "~=", "==="]: - # The == specifier can include a trailing .*, if it does we - # want to remove before parsing. - if operator == "==" and version.endswith(".*"): - version = version[:-2] + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. - # Parse the version, and if it is a pre-release than this - # specifier allows pre-releases. - if parse(version).is_prerelease: - return True + >>> "1.2.3" in Specifier(">=1.2.3") + True + >>> Version("1.2.3") in Specifier(">=1.2.3") + True + >>> "1.0.0" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) + True + """ + return self.contains(item) - return False + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + """Return whether or not the item is contained in this specifier. - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this Specifier. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> Specifier(">=1.2.3").contains("1.2.3") + True + >>> Specifier(">=1.2.3").contains(Version("1.2.3")) + True + >>> Specifier(">=1.2.3").contains("1.0.0") + False + >>> Specifier(">=1.2.3").contains("1.3.0a1") + False + >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") + True + >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) + True + """ + + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version, this allows us to have a shortcut for + # "2.0" in Specifier(">=2") + normalized_item = _coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifier. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(Specifier().contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) + ['1.2.3', '1.3', ] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) + ['1.5a1'] + >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + """ + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = _coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") @@ -618,22 +676,39 @@ def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str class SpecifierSet(BaseSpecifier): + """This class abstracts handling of a set of version specifiers. + + It can be passed a single specifier (``>=3.0``), a comma-separated list of + specifiers (``>=3.0,!=3.1``), or no specifier at all. + """ + def __init__( self, specifiers: str = "", prereleases: Optional[bool] = None ) -> None: + """Initialize a SpecifierSet instance. - # Split on , to break each individual specifier into it's own item, and + :param specifiers: + The string representation of a specifier or a comma-separated list of + specifiers which will be parsed and normalized before use. + :param prereleases: + This tells the SpecifierSet if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + + :raises InvalidSpecifier: + If the given ``specifiers`` are not parseable than this exception will be + raised. + """ + + # Split on `,` to break each individual specifier into it's own item, and # strip each item to remove leading/trailing whitespace. split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] # Parsed each individual specifier, attempting first to make it a - # Specifier and falling back to a LegacySpecifier. - parsed: Set[_IndividualSpecifier] = set() + # Specifier. + parsed: Set[Specifier] = set() for specifier in split_specifiers: - try: - parsed.add(Specifier(specifier)) - except InvalidSpecifier: - parsed.add(LegacySpecifier(specifier)) + parsed.add(Specifier(specifier)) # Turn our parsed specifiers into a frozen set and save them for later. self._specs = frozenset(parsed) @@ -642,7 +717,40 @@ class SpecifierSet(BaseSpecifier): # we accept prereleases or not. self._prereleases = prereleases + @property + def prereleases(self) -> Optional[bool]: + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + def __repr__(self) -> str: + """A representation of the specifier set that shows all internal state. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> SpecifierSet('>=1.0.0,!=2.0.0') + =1.0.0')> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ pre = ( f", prereleases={self.prereleases!r}" if self._prereleases is not None @@ -652,12 +760,31 @@ class SpecifierSet(BaseSpecifier): return f"" def __str__(self) -> str: + """A string representation of the specifier set that can be round-tripped. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> str(SpecifierSet(">=1.0.0,!=1.0.1")) + '!=1.0.1,>=1.0.0' + >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) + '!=1.0.1,>=1.0.0' + """ return ",".join(sorted(str(s) for s in self._specs)) def __hash__(self) -> int: return hash(self._specs) def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": + """Return a SpecifierSet which is a combination of the two sets. + + :param other: The other object to combine with. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' + =1.0.0')> + >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') + =1.0.0')> + """ if isinstance(other, str): other = SpecifierSet(other) elif not isinstance(other, SpecifierSet): @@ -681,7 +808,25 @@ class SpecifierSet(BaseSpecifier): return specifier def __eq__(self, other: object) -> bool: - if isinstance(other, (str, _IndividualSpecifier)): + """Whether or not the two SpecifierSet-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == + ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") + False + """ + if isinstance(other, (str, Specifier)): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented @@ -689,43 +834,72 @@ class SpecifierSet(BaseSpecifier): return self._specs == other._specs def __len__(self) -> int: + """Returns the number of specifiers in this specifier set.""" return len(self._specs) - def __iter__(self) -> Iterator[_IndividualSpecifier]: + def __iter__(self) -> Iterator[Specifier]: + """ + Returns an iterator over all the underlying :class:`Specifier` instances + in this specifier set. + + >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) + [, =1.0.0')>] + """ return iter(self._specs) - @property - def prereleases(self) -> Optional[bool]: - - # If we have been given an explicit prerelease modifier, then we'll - # pass that through here. - if self._prereleases is not None: - return self._prereleases - - # If we don't have any specifiers, and we don't have a forced value, - # then we'll just return None since we don't know if this should have - # pre-releases or not. - if not self._specs: - return None - - # Otherwise we'll see if any of the given specifiers accept - # prereleases, if any of them do we'll return True, otherwise False. - return any(s.prereleases for s in self._specs) - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - def __contains__(self, item: UnparsedVersion) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) + True + """ return self.contains(item) def contains( - self, item: UnparsedVersion, prereleases: Optional[bool] = None + self, + item: UnparsedVersion, + prereleases: Optional[bool] = None, + installed: Optional[bool] = None, ) -> bool: + """Return whether or not the item is contained in this SpecifierSet. - # Ensure that our item is a Version or LegacyVersion instance. - if not isinstance(item, (LegacyVersion, Version)): - item = parse(item) + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this SpecifierSet. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) + True + """ + # Ensure that our item is a Version instance. + if not isinstance(item, Version): + item = Version(item) # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the @@ -742,6 +916,9 @@ class SpecifierSet(BaseSpecifier): if not prereleases and item.is_prerelease: return False + if installed and item.is_prerelease: + item = Version(item.base_version) + # We simply dispatch to the underlying specs here to make sure that the # given version is contained within all of them. # Note: This use of all() here means that an empty set of specifiers @@ -749,9 +926,46 @@ class SpecifierSet(BaseSpecifier): return all(s.contains(item, prereleases=prereleases) for s in self._specs) def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifiers in this set. + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) + ['1.3', ] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) + [] + >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + + An "empty" SpecifierSet will filter items based on the presence of prerelease + versions in the set. + + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet("").filter(["1.5a1"])) + ['1.5a1'] + >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + """ # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. @@ -764,27 +978,16 @@ class SpecifierSet(BaseSpecifier): if self._specs: for spec in self._specs: iterable = spec.filter(iterable, prereleases=bool(prereleases)) - return iterable + return iter(iterable) # If we do not have any specifiers, then we need to have a rough filter # which will filter out any pre-releases, unless there are no final - # releases, and which will filter out LegacyVersion in general. + # releases. else: - filtered: List[VersionTypeVar] = [] - found_prereleases: List[VersionTypeVar] = [] - - item: UnparsedVersion - parsed_version: Union[Version, LegacyVersion] + filtered: List[UnparsedVersionVar] = [] + found_prereleases: List[UnparsedVersionVar] = [] for item in iterable: - # Ensure that we some kind of Version class for this item. - if not isinstance(item, (LegacyVersion, Version)): - parsed_version = parse(item) - else: - parsed_version = item - - # Filter out any item which is parsed as a LegacyVersion - if isinstance(parsed_version, LegacyVersion): - continue + parsed_version = _coerce_version(item) # Store any item which is a pre-release for later unless we've # already found a final version or we are accepting prereleases @@ -797,6 +1000,6 @@ class SpecifierSet(BaseSpecifier): # If we've found no items except for pre-releases, then we'll go # ahead and use the pre-releases if not filtered and found_prereleases and prereleases is None: - return found_prereleases + return iter(found_prereleases) - return filtered + return iter(filtered) diff --git a/lib/pkg_resources/_vendor/packaging/tags.py b/lib/pkg_resources/_vendor/packaging/tags.py index 9a3d25a7..19ccbde3 100644 --- a/lib/pkg_resources/_vendor/packaging/tags.py +++ b/lib/pkg_resources/_vendor/packaging/tags.py @@ -4,6 +4,7 @@ import logging import platform +import subprocess import sys import sysconfig from importlib.machinery import EXTENSION_SUFFIXES @@ -36,7 +37,7 @@ INTERPRETER_SHORT_NAMES: Dict[str, str] = { } -_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 +_32_BIT_INTERPRETER = sys.maxsize <= 2**32 class Tag: @@ -224,10 +225,45 @@ def cpython_tags( yield Tag(interpreter, "abi3", platform_) -def _generic_abi() -> Iterator[str]: - abi = sysconfig.get_config_var("SOABI") - if abi: - yield _normalize_string(abi) +def _generic_abi() -> List[str]: + """ + Return the ABI tag based on EXT_SUFFIX. + """ + # The following are examples of `EXT_SUFFIX`. + # We want to keep the parts which are related to the ABI and remove the + # parts which are related to the platform: + # - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310 + # - mac: '.cpython-310-darwin.so' => cp310 + # - win: '.cp310-win_amd64.pyd' => cp310 + # - win: '.pyd' => cp37 (uses _cpython_abis()) + # - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73 + # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib' + # => graalpy_38_native + + ext_suffix = _get_config_var("EXT_SUFFIX", warn=True) + if not isinstance(ext_suffix, str) or ext_suffix[0] != ".": + raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')") + parts = ext_suffix.split(".") + if len(parts) < 3: + # CPython3.7 and earlier uses ".pyd" on Windows. + return _cpython_abis(sys.version_info[:2]) + soabi = parts[1] + if soabi.startswith("cpython"): + # non-windows + abi = "cp" + soabi.split("-")[1] + elif soabi.startswith("cp"): + # windows + abi = soabi.split("-")[0] + elif soabi.startswith("pypy"): + abi = "-".join(soabi.split("-")[:2]) + elif soabi.startswith("graalpy"): + abi = "-".join(soabi.split("-")[:3]) + elif soabi: + # pyston, ironpython, others? + abi = soabi + else: + return [] + return [_normalize_string(abi)] def generic_tags( @@ -251,8 +287,9 @@ def generic_tags( interpreter = "".join([interp_name, interp_version]) if abis is None: abis = _generic_abi() + else: + abis = list(abis) platforms = list(platforms or platform_tags()) - abis = list(abis) if "none" not in abis: abis.append("none") for abi in abis: @@ -356,6 +393,22 @@ def mac_platforms( version_str, _, cpu_arch = platform.mac_ver() if version is None: version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) + if version == (10, 16): + # When built against an older macOS SDK, Python will report macOS 10.16 + # instead of the real version. + version_str = subprocess.run( + [ + sys.executable, + "-sS", + "-c", + "import platform; print(platform.mac_ver()[0])", + ], + check=True, + env={"SYSTEM_VERSION_COMPAT": "0"}, + stdout=subprocess.PIPE, + universal_newlines=True, + ).stdout + version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) else: version = version if arch is None: @@ -446,6 +499,9 @@ def platform_tags() -> Iterator[str]: def interpreter_name() -> str: """ Returns the name of the running interpreter. + + Some implementations have a reserved, two-letter abbreviation which will + be returned when appropriate. """ name = sys.implementation.name return INTERPRETER_SHORT_NAMES.get(name) or name @@ -482,6 +538,9 @@ def sys_tags(*, warn: bool = False) -> Iterator[Tag]: yield from generic_tags() if interp_name == "pp": - yield from compatible_tags(interpreter="pp3") + interp = "pp3" + elif interp_name == "cp": + interp = "cp" + interpreter_version(warn=warn) else: - yield from compatible_tags() + interp = None + yield from compatible_tags(interpreter=interp) diff --git a/lib/pkg_resources/_vendor/packaging/utils.py b/lib/pkg_resources/_vendor/packaging/utils.py index bab11b80..33c613b7 100644 --- a/lib/pkg_resources/_vendor/packaging/utils.py +++ b/lib/pkg_resources/_vendor/packaging/utils.py @@ -35,7 +35,9 @@ def canonicalize_name(name: str) -> NormalizedName: return cast(NormalizedName, value) -def canonicalize_version(version: Union[Version, str]) -> str: +def canonicalize_version( + version: Union[Version, str], *, strip_trailing_zero: bool = True +) -> str: """ This is very similar to Version.__str__, but has one subtle difference with the way it handles the release segment. @@ -56,8 +58,11 @@ def canonicalize_version(version: Union[Version, str]) -> str: parts.append(f"{parsed.epoch}!") # Release segment - # NB: This strips trailing '.0's to normalize - parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release))) + release_segment = ".".join(str(x) for x in parsed.release) + if strip_trailing_zero: + # NB: This strips trailing '.0's to normalize + release_segment = re.sub(r"(\.0)+$", "", release_segment) + parts.append(release_segment) # Pre-release if parsed.pre is not None: diff --git a/lib/pkg_resources/_vendor/packaging/version.py b/lib/pkg_resources/_vendor/packaging/version.py index de9a09a4..e5c738cf 100644 --- a/lib/pkg_resources/_vendor/packaging/version.py +++ b/lib/pkg_resources/_vendor/packaging/version.py @@ -1,16 +1,20 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. +""" +.. testsetup:: + + from packaging.version import parse, Version +""" import collections import itertools import re -import warnings -from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union +from typing import Callable, Optional, SupportsInt, Tuple, Union from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType -__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] +__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"] InfiniteTypes = Union[InfinityType, NegativeInfinityType] PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] @@ -29,36 +33,37 @@ LocalType = Union[ CmpKey = Tuple[ int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType ] -LegacyCmpKey = Tuple[int, Tuple[str, ...]] -VersionComparisonMethod = Callable[ - [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool -] +VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] _Version = collections.namedtuple( "_Version", ["epoch", "release", "dev", "pre", "post", "local"] ) -def parse(version: str) -> Union["LegacyVersion", "Version"]: +def parse(version: str) -> "Version": + """Parse the given version string. + + >>> parse('1.0.dev1') + + + :param version: The version string to parse. + :raises InvalidVersion: When the version string is not a valid version. """ - Parse the given version string and return either a :class:`Version` object - or a :class:`LegacyVersion` object depending on if the given version is - a valid PEP 440 version or a legacy version. - """ - try: - return Version(version) - except InvalidVersion: - return LegacyVersion(version) + return Version(version) class InvalidVersion(ValueError): - """ - An invalid version was found, users should refer to PEP 440. + """Raised when a version string is not a valid version. + + >>> Version("invalid") + Traceback (most recent call last): + ... + packaging.version.InvalidVersion: Invalid version: 'invalid' """ class _BaseVersion: - _key: Union[CmpKey, LegacyCmpKey] + _key: CmpKey def __hash__(self) -> int: return hash(self._key) @@ -103,126 +108,9 @@ class _BaseVersion: return self._key != other._key -class LegacyVersion(_BaseVersion): - def __init__(self, version: str) -> None: - self._version = str(version) - self._key = _legacy_cmpkey(self._version) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def __str__(self) -> str: - return self._version - - def __repr__(self) -> str: - return f"" - - @property - def public(self) -> str: - return self._version - - @property - def base_version(self) -> str: - return self._version - - @property - def epoch(self) -> int: - return -1 - - @property - def release(self) -> None: - return None - - @property - def pre(self) -> None: - return None - - @property - def post(self) -> None: - return None - - @property - def dev(self) -> None: - return None - - @property - def local(self) -> None: - return None - - @property - def is_prerelease(self) -> bool: - return False - - @property - def is_postrelease(self) -> bool: - return False - - @property - def is_devrelease(self) -> bool: - return False - - -_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) - -_legacy_version_replacement_map = { - "pre": "c", - "preview": "c", - "-": "final-", - "rc": "c", - "dev": "@", -} - - -def _parse_version_parts(s: str) -> Iterator[str]: - for part in _legacy_version_component_re.split(s): - part = _legacy_version_replacement_map.get(part, part) - - if not part or part == ".": - continue - - if part[:1] in "0123456789": - # pad for numeric comparison - yield part.zfill(8) - else: - yield "*" + part - - # ensure that alpha/beta/candidate are before final - yield "*final" - - -def _legacy_cmpkey(version: str) -> LegacyCmpKey: - - # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch - # greater than or equal to 0. This will effectively put the LegacyVersion, - # which uses the defacto standard originally implemented by setuptools, - # as before all PEP 440 versions. - epoch = -1 - - # This scheme is taken from pkg_resources.parse_version setuptools prior to - # it's adoption of the packaging library. - parts: List[str] = [] - for part in _parse_version_parts(version.lower()): - if part.startswith("*"): - # remove "-" before a prerelease tag - if part < "*final": - while parts and parts[-1] == "*final-": - parts.pop() - - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == "00000000": - parts.pop() - - parts.append(part) - - return epoch, tuple(parts) - - # Deliberately not anchored to the start and end of the string, to make it # easier for 3rd party code to reuse -VERSION_PATTERN = r""" +_VERSION_PATTERN = r""" v? (?: (?:(?P[0-9]+)!)? # epoch @@ -253,12 +141,55 @@ VERSION_PATTERN = r""" (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version """ +VERSION_PATTERN = _VERSION_PATTERN +""" +A string containing the regular expression used to match a valid version. + +The pattern is not anchored at either end, and is intended for embedding in larger +expressions (for example, matching a version number as part of a file name). The +regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE`` +flags set. + +:meta hide-value: +""" + class Version(_BaseVersion): + """This class abstracts handling of a project's versions. + + A :class:`Version` instance is comparison aware and can be compared and + sorted using the standard Python interfaces. + + >>> v1 = Version("1.0a5") + >>> v2 = Version("1.0") + >>> v1 + + >>> v2 + + >>> v1 < v2 + True + >>> v1 == v2 + False + >>> v1 > v2 + False + >>> v1 >= v2 + False + >>> v1 <= v2 + True + """ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) def __init__(self, version: str) -> None: + """Initialize a Version object. + + :param version: + The string representation of a version which will be parsed and normalized + before use. + :raises InvalidVersion: + If the ``version`` does not conform to PEP 440 in any way then this + exception will be raised. + """ # Validate the version and parse it into pieces match = self._regex.search(version) @@ -288,9 +219,19 @@ class Version(_BaseVersion): ) def __repr__(self) -> str: + """A representation of the Version that shows all internal state. + + >>> Version('1.0.0') + + """ return f"" def __str__(self) -> str: + """A string representation of the version that can be rounded-tripped. + + >>> str(Version("1.0a5")) + '1.0a5' + """ parts = [] # Epoch @@ -320,29 +261,80 @@ class Version(_BaseVersion): @property def epoch(self) -> int: + """The epoch of the version. + + >>> Version("2.0.0").epoch + 0 + >>> Version("1!2.0.0").epoch + 1 + """ _epoch: int = self._version.epoch return _epoch @property def release(self) -> Tuple[int, ...]: + """The components of the "release" segment of the version. + + >>> Version("1.2.3").release + (1, 2, 3) + >>> Version("2.0.0").release + (2, 0, 0) + >>> Version("1!2.0.0.post0").release + (2, 0, 0) + + Includes trailing zeroes but not the epoch or any pre-release / development / + post-release suffixes. + """ _release: Tuple[int, ...] = self._version.release return _release @property def pre(self) -> Optional[Tuple[str, int]]: + """The pre-release segment of the version. + + >>> print(Version("1.2.3").pre) + None + >>> Version("1.2.3a1").pre + ('a', 1) + >>> Version("1.2.3b1").pre + ('b', 1) + >>> Version("1.2.3rc1").pre + ('rc', 1) + """ _pre: Optional[Tuple[str, int]] = self._version.pre return _pre @property def post(self) -> Optional[int]: + """The post-release number of the version. + + >>> print(Version("1.2.3").post) + None + >>> Version("1.2.3.post1").post + 1 + """ return self._version.post[1] if self._version.post else None @property def dev(self) -> Optional[int]: + """The development number of the version. + + >>> print(Version("1.2.3").dev) + None + >>> Version("1.2.3.dev1").dev + 1 + """ return self._version.dev[1] if self._version.dev else None @property def local(self) -> Optional[str]: + """The local version segment of the version. + + >>> print(Version("1.2.3").local) + None + >>> Version("1.2.3+abc").local + 'abc' + """ if self._version.local: return ".".join(str(x) for x in self._version.local) else: @@ -350,10 +342,31 @@ class Version(_BaseVersion): @property def public(self) -> str: + """The public portion of the version. + + >>> Version("1.2.3").public + '1.2.3' + >>> Version("1.2.3+abc").public + '1.2.3' + >>> Version("1.2.3+abc.dev1").public + '1.2.3' + """ return str(self).split("+", 1)[0] @property def base_version(self) -> str: + """The "base version" of the version. + + >>> Version("1.2.3").base_version + '1.2.3' + >>> Version("1.2.3+abc").base_version + '1.2.3' + >>> Version("1!1.2.3+abc.dev1").base_version + '1!1.2.3' + + The "base version" is the public version of the project without any pre or post + release markers. + """ parts = [] # Epoch @@ -367,26 +380,72 @@ class Version(_BaseVersion): @property def is_prerelease(self) -> bool: + """Whether this version is a pre-release. + + >>> Version("1.2.3").is_prerelease + False + >>> Version("1.2.3a1").is_prerelease + True + >>> Version("1.2.3b1").is_prerelease + True + >>> Version("1.2.3rc1").is_prerelease + True + >>> Version("1.2.3dev1").is_prerelease + True + """ return self.dev is not None or self.pre is not None @property def is_postrelease(self) -> bool: + """Whether this version is a post-release. + + >>> Version("1.2.3").is_postrelease + False + >>> Version("1.2.3.post1").is_postrelease + True + """ return self.post is not None @property def is_devrelease(self) -> bool: + """Whether this version is a development release. + + >>> Version("1.2.3").is_devrelease + False + >>> Version("1.2.3.dev1").is_devrelease + True + """ return self.dev is not None @property def major(self) -> int: + """The first item of :attr:`release` or ``0`` if unavailable. + + >>> Version("1.2.3").major + 1 + """ return self.release[0] if len(self.release) >= 1 else 0 @property def minor(self) -> int: + """The second item of :attr:`release` or ``0`` if unavailable. + + >>> Version("1.2.3").minor + 2 + >>> Version("1").minor + 0 + """ return self.release[1] if len(self.release) >= 2 else 0 @property def micro(self) -> int: + """The third item of :attr:`release` or ``0`` if unavailable. + + >>> Version("1.2.3").micro + 3 + >>> Version("1").micro + 0 + """ return self.release[2] if len(self.release) >= 3 else 0 diff --git a/lib/pkg_resources/_vendor/platformdirs/__init__.py b/lib/pkg_resources/_vendor/platformdirs/__init__.py new file mode 100644 index 00000000..aef2821b --- /dev/null +++ b/lib/pkg_resources/_vendor/platformdirs/__init__.py @@ -0,0 +1,342 @@ +""" +Utilities for determining application-specific dirs. See for details and +usage. +""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +if sys.version_info >= (3, 8): # pragma: no cover (py38+) + from typing import Literal +else: # pragma: no cover (py38+) + from ..typing_extensions import Literal + +from .api import PlatformDirsABC +from .version import __version__ +from .version import __version_tuple__ as __version_info__ + + +def _set_platform_dir_class() -> type[PlatformDirsABC]: + if sys.platform == "win32": + from .windows import Windows as Result + elif sys.platform == "darwin": + from .macos import MacOS as Result + else: + from .unix import Unix as Result + + if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system": + + if os.getenv("SHELL") or os.getenv("PREFIX"): + return Result + + from .android import _android_folder + + if _android_folder() is not None: + from .android import Android + + return Android # return to avoid redefinition of result + + return Result + + +PlatformDirs = _set_platform_dir_class() #: Currently active platform +AppDirs = PlatformDirs #: Backwards compatibility with appdirs + + +def user_data_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :returns: data directory tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_dir + + +def site_data_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + multipath: bool = False, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param multipath: See `roaming `. + :returns: data directory shared by users + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_dir + + +def user_config_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :returns: config directory tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_dir + + +def site_config_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + multipath: bool = False, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param multipath: See `roaming `. + :returns: config directory shared by the users + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_dir + + +def user_cache_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `roaming `. + :returns: cache directory tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_dir + + +def user_state_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :returns: state directory tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_dir + + +def user_log_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `roaming `. + :returns: log directory tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_dir + + +def user_documents_dir() -> str: + """ + :returns: documents directory tied to the user + """ + return PlatformDirs().user_documents_dir + + +def user_runtime_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `opinion `. + :returns: runtime directory tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_dir + + +def user_data_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :returns: data path tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_path + + +def site_data_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + multipath: bool = False, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param multipath: See `multipath `. + :returns: data path shared by users + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_path + + +def user_config_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :returns: config path tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_path + + +def site_config_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + multipath: bool = False, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param multipath: See `roaming `. + :returns: config path shared by the users + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_path + + +def user_cache_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `roaming `. + :returns: cache path tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_path + + +def user_state_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :returns: state path tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_path + + +def user_log_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `roaming `. + :returns: log path tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_path + + +def user_documents_path() -> Path: + """ + :returns: documents path tied to the user + """ + return PlatformDirs().user_documents_path + + +def user_runtime_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `opinion `. + :returns: runtime path tied to the user + """ + return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_path + + +__all__ = [ + "__version__", + "__version_info__", + "PlatformDirs", + "AppDirs", + "PlatformDirsABC", + "user_data_dir", + "user_config_dir", + "user_cache_dir", + "user_state_dir", + "user_log_dir", + "user_documents_dir", + "user_runtime_dir", + "site_data_dir", + "site_config_dir", + "user_data_path", + "user_config_path", + "user_cache_path", + "user_state_path", + "user_log_path", + "user_documents_path", + "user_runtime_path", + "site_data_path", + "site_config_path", +] diff --git a/lib/pkg_resources/_vendor/platformdirs/__main__.py b/lib/pkg_resources/_vendor/platformdirs/__main__.py new file mode 100644 index 00000000..0fc1edd5 --- /dev/null +++ b/lib/pkg_resources/_vendor/platformdirs/__main__.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from platformdirs import PlatformDirs, __version__ + +PROPS = ( + "user_data_dir", + "user_config_dir", + "user_cache_dir", + "user_state_dir", + "user_log_dir", + "user_documents_dir", + "user_runtime_dir", + "site_data_dir", + "site_config_dir", +) + + +def main() -> None: + app_name = "MyApp" + app_author = "MyCompany" + + print(f"-- platformdirs {__version__} --") + + print("-- app dirs (with optional 'version')") + dirs = PlatformDirs(app_name, app_author, version="1.0") + for prop in PROPS: + print(f"{prop}: {getattr(dirs, prop)}") + + print("\n-- app dirs (without optional 'version')") + dirs = PlatformDirs(app_name, app_author) + for prop in PROPS: + print(f"{prop}: {getattr(dirs, prop)}") + + print("\n-- app dirs (without optional 'appauthor')") + dirs = PlatformDirs(app_name) + for prop in PROPS: + print(f"{prop}: {getattr(dirs, prop)}") + + print("\n-- app dirs (with disabled 'appauthor')") + dirs = PlatformDirs(app_name, appauthor=False) + for prop in PROPS: + print(f"{prop}: {getattr(dirs, prop)}") + + +if __name__ == "__main__": + main() diff --git a/lib/pkg_resources/_vendor/platformdirs/android.py b/lib/pkg_resources/_vendor/platformdirs/android.py new file mode 100644 index 00000000..eda80935 --- /dev/null +++ b/lib/pkg_resources/_vendor/platformdirs/android.py @@ -0,0 +1,120 @@ +from __future__ import annotations + +import os +import re +import sys +from functools import lru_cache +from typing import cast + +from .api import PlatformDirsABC + + +class Android(PlatformDirsABC): + """ + Follows the guidance `from here `_. Makes use of the + `appname ` and + `version `. + """ + + @property + def user_data_dir(self) -> str: + """:return: data directory tied to the user, e.g. ``/data/user///files/``""" + return self._append_app_name_and_version(cast(str, _android_folder()), "files") + + @property + def site_data_dir(self) -> str: + """:return: data directory shared by users, same as `user_data_dir`""" + return self.user_data_dir + + @property + def user_config_dir(self) -> str: + """ + :return: config directory tied to the user, e.g. ``/data/user///shared_prefs/`` + """ + return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs") + + @property + def site_config_dir(self) -> str: + """:return: config directory shared by the users, same as `user_config_dir`""" + return self.user_config_dir + + @property + def user_cache_dir(self) -> str: + """:return: cache directory tied to the user, e.g. e.g. ``/data/user///cache/``""" + return self._append_app_name_and_version(cast(str, _android_folder()), "cache") + + @property + def user_state_dir(self) -> str: + """:return: state directory tied to the user, same as `user_data_dir`""" + return self.user_data_dir + + @property + def user_log_dir(self) -> str: + """ + :return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it, + e.g. ``/data/user///cache//log`` + """ + path = self.user_cache_dir + if self.opinion: + path = os.path.join(path, "log") + return path + + @property + def user_documents_dir(self) -> str: + """ + :return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents`` + """ + return _android_documents_folder() + + @property + def user_runtime_dir(self) -> str: + """ + :return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it, + e.g. ``/data/user///cache//tmp`` + """ + path = self.user_cache_dir + if self.opinion: + path = os.path.join(path, "tmp") + return path + + +@lru_cache(maxsize=1) +def _android_folder() -> str | None: + """:return: base folder for the Android OS or None if cannot be found""" + try: + # First try to get path to android app via pyjnius + from jnius import autoclass + + Context = autoclass("android.content.Context") # noqa: N806 + result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath() + except Exception: + # if fails find an android folder looking path on the sys.path + pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files") + for path in sys.path: + if pattern.match(path): + result = path.split("/files")[0] + break + else: + result = None + return result + + +@lru_cache(maxsize=1) +def _android_documents_folder() -> str: + """:return: documents folder for the Android OS""" + # Get directories with pyjnius + try: + from jnius import autoclass + + Context = autoclass("android.content.Context") # noqa: N806 + Environment = autoclass("android.os.Environment") # noqa: N806 + documents_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_DOCUMENTS).getAbsolutePath() + except Exception: + documents_dir = "/storage/emulated/0/Documents" + + return documents_dir + + +__all__ = [ + "Android", +] diff --git a/lib/pkg_resources/_vendor/platformdirs/api.py b/lib/pkg_resources/_vendor/platformdirs/api.py new file mode 100644 index 00000000..6f6e2c2c --- /dev/null +++ b/lib/pkg_resources/_vendor/platformdirs/api.py @@ -0,0 +1,156 @@ +from __future__ import annotations + +import os +import sys +from abc import ABC, abstractmethod +from pathlib import Path + +if sys.version_info >= (3, 8): # pragma: no branch + from typing import Literal # pragma: no cover + + +class PlatformDirsABC(ABC): + """ + Abstract base class for platform directories. + """ + + def __init__( + self, + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, + multipath: bool = False, + opinion: bool = True, + ): + """ + Create a new platform directory. + + :param appname: See `appname`. + :param appauthor: See `appauthor`. + :param version: See `version`. + :param roaming: See `roaming`. + :param multipath: See `multipath`. + :param opinion: See `opinion`. + """ + self.appname = appname #: The name of application. + self.appauthor = appauthor + """ + The name of the app author or distributing body for this application. Typically, it is the owning company name. + Defaults to `appname`. You may pass ``False`` to disable it. + """ + self.version = version + """ + An optional version path element to append to the path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this would typically be ``.``. + """ + self.roaming = roaming + """ + Whether to use the roaming appdata directory on Windows. That means that for users on a Windows network setup + for roaming profiles, this user data will be synced on login (see + `here `_). + """ + self.multipath = multipath + """ + An optional parameter only applicable to Unix/Linux which indicates that the entire list of data dirs should be + returned. By default, the first item would only be returned. + """ + self.opinion = opinion #: A flag to indicating to use opinionated values. + + def _append_app_name_and_version(self, *base: str) -> str: + params = list(base[1:]) + if self.appname: + params.append(self.appname) + if self.version: + params.append(self.version) + return os.path.join(base[0], *params) + + @property + @abstractmethod + def user_data_dir(self) -> str: + """:return: data directory tied to the user""" + + @property + @abstractmethod + def site_data_dir(self) -> str: + """:return: data directory shared by users""" + + @property + @abstractmethod + def user_config_dir(self) -> str: + """:return: config directory tied to the user""" + + @property + @abstractmethod + def site_config_dir(self) -> str: + """:return: config directory shared by the users""" + + @property + @abstractmethod + def user_cache_dir(self) -> str: + """:return: cache directory tied to the user""" + + @property + @abstractmethod + def user_state_dir(self) -> str: + """:return: state directory tied to the user""" + + @property + @abstractmethod + def user_log_dir(self) -> str: + """:return: log directory tied to the user""" + + @property + @abstractmethod + def user_documents_dir(self) -> str: + """:return: documents directory tied to the user""" + + @property + @abstractmethod + def user_runtime_dir(self) -> str: + """:return: runtime directory tied to the user""" + + @property + def user_data_path(self) -> Path: + """:return: data path tied to the user""" + return Path(self.user_data_dir) + + @property + def site_data_path(self) -> Path: + """:return: data path shared by users""" + return Path(self.site_data_dir) + + @property + def user_config_path(self) -> Path: + """:return: config path tied to the user""" + return Path(self.user_config_dir) + + @property + def site_config_path(self) -> Path: + """:return: config path shared by the users""" + return Path(self.site_config_dir) + + @property + def user_cache_path(self) -> Path: + """:return: cache path tied to the user""" + return Path(self.user_cache_dir) + + @property + def user_state_path(self) -> Path: + """:return: state path tied to the user""" + return Path(self.user_state_dir) + + @property + def user_log_path(self) -> Path: + """:return: log path tied to the user""" + return Path(self.user_log_dir) + + @property + def user_documents_path(self) -> Path: + """:return: documents path tied to the user""" + return Path(self.user_documents_dir) + + @property + def user_runtime_path(self) -> Path: + """:return: runtime path tied to the user""" + return Path(self.user_runtime_dir) diff --git a/lib/pkg_resources/_vendor/platformdirs/macos.py b/lib/pkg_resources/_vendor/platformdirs/macos.py new file mode 100644 index 00000000..a01337c7 --- /dev/null +++ b/lib/pkg_resources/_vendor/platformdirs/macos.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import os + +from .api import PlatformDirsABC + + +class MacOS(PlatformDirsABC): + """ + Platform directories for the macOS operating system. Follows the guidance from `Apple documentation + `_. + Makes use of the `appname ` and + `version `. + """ + + @property + def user_data_dir(self) -> str: + """:return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support/")) + + @property + def site_data_dir(self) -> str: + """:return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``""" + return self._append_app_name_and_version("/Library/Application Support") + + @property + def user_config_dir(self) -> str: + """:return: config directory tied to the user, e.g. ``~/Library/Preferences/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Preferences/")) + + @property + def site_config_dir(self) -> str: + """:return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``""" + return self._append_app_name_and_version("/Library/Preferences") + + @property + def user_cache_dir(self) -> str: + """:return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches")) + + @property + def user_state_dir(self) -> str: + """:return: state directory tied to the user, same as `user_data_dir`""" + return self.user_data_dir + + @property + def user_log_dir(self) -> str: + """:return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs")) + + @property + def user_documents_dir(self) -> str: + """:return: documents directory tied to the user, e.g. ``~/Documents``""" + return os.path.expanduser("~/Documents") + + @property + def user_runtime_dir(self) -> str: + """:return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems")) + + +__all__ = [ + "MacOS", +] diff --git a/lib/pkg_resources/_vendor/pyparsing/py.typed b/lib/pkg_resources/_vendor/platformdirs/py.typed similarity index 100% rename from lib/pkg_resources/_vendor/pyparsing/py.typed rename to lib/pkg_resources/_vendor/platformdirs/py.typed diff --git a/lib/pkg_resources/_vendor/platformdirs/unix.py b/lib/pkg_resources/_vendor/platformdirs/unix.py new file mode 100644 index 00000000..9aca5a03 --- /dev/null +++ b/lib/pkg_resources/_vendor/platformdirs/unix.py @@ -0,0 +1,181 @@ +from __future__ import annotations + +import os +import sys +from configparser import ConfigParser +from pathlib import Path + +from .api import PlatformDirsABC + +if sys.platform.startswith("linux"): # pragma: no branch # no op check, only to please the type checker + from os import getuid +else: + + def getuid() -> int: + raise RuntimeError("should only be used on Linux") + + +class Unix(PlatformDirsABC): + """ + On Unix/Linux, we follow the + `XDG Basedir Spec `_. The spec allows + overriding directories with environment variables. The examples show are the default values, alongside the name of + the environment variable that overrides them. Makes use of the + `appname `, + `version `, + `multipath `, + `opinion `. + """ + + @property + def user_data_dir(self) -> str: + """ + :return: data directory tied to the user, e.g. ``~/.local/share/$appname/$version`` or + ``$XDG_DATA_HOME/$appname/$version`` + """ + path = os.environ.get("XDG_DATA_HOME", "") + if not path.strip(): + path = os.path.expanduser("~/.local/share") + return self._append_app_name_and_version(path) + + @property + def site_data_dir(self) -> str: + """ + :return: data directories shared by users (if `multipath ` is + enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS + path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version`` + """ + # XDG default for $XDG_DATA_DIRS; only first, if multipath is False + path = os.environ.get("XDG_DATA_DIRS", "") + if not path.strip(): + path = f"/usr/local/share{os.pathsep}/usr/share" + return self._with_multi_path(path) + + def _with_multi_path(self, path: str) -> str: + path_list = path.split(os.pathsep) + if not self.multipath: + path_list = path_list[0:1] + path_list = [self._append_app_name_and_version(os.path.expanduser(p)) for p in path_list] + return os.pathsep.join(path_list) + + @property + def user_config_dir(self) -> str: + """ + :return: config directory tied to the user, e.g. ``~/.config/$appname/$version`` or + ``$XDG_CONFIG_HOME/$appname/$version`` + """ + path = os.environ.get("XDG_CONFIG_HOME", "") + if not path.strip(): + path = os.path.expanduser("~/.config") + return self._append_app_name_and_version(path) + + @property + def site_config_dir(self) -> str: + """ + :return: config directories shared by users (if `multipath ` + is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS + path separator), e.g. ``/etc/xdg/$appname/$version`` + """ + # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False + path = os.environ.get("XDG_CONFIG_DIRS", "") + if not path.strip(): + path = "/etc/xdg" + return self._with_multi_path(path) + + @property + def user_cache_dir(self) -> str: + """ + :return: cache directory tied to the user, e.g. ``~/.cache/$appname/$version`` or + ``~/$XDG_CACHE_HOME/$appname/$version`` + """ + path = os.environ.get("XDG_CACHE_HOME", "") + if not path.strip(): + path = os.path.expanduser("~/.cache") + return self._append_app_name_and_version(path) + + @property + def user_state_dir(self) -> str: + """ + :return: state directory tied to the user, e.g. ``~/.local/state/$appname/$version`` or + ``$XDG_STATE_HOME/$appname/$version`` + """ + path = os.environ.get("XDG_STATE_HOME", "") + if not path.strip(): + path = os.path.expanduser("~/.local/state") + return self._append_app_name_and_version(path) + + @property + def user_log_dir(self) -> str: + """ + :return: log directory tied to the user, same as `user_state_dir` if not opinionated else ``log`` in it + """ + path = self.user_state_dir + if self.opinion: + path = os.path.join(path, "log") + return path + + @property + def user_documents_dir(self) -> str: + """ + :return: documents directory tied to the user, e.g. ``~/Documents`` + """ + documents_dir = _get_user_dirs_folder("XDG_DOCUMENTS_DIR") + if documents_dir is None: + documents_dir = os.environ.get("XDG_DOCUMENTS_DIR", "").strip() + if not documents_dir: + documents_dir = os.path.expanduser("~/Documents") + + return documents_dir + + @property + def user_runtime_dir(self) -> str: + """ + :return: runtime directory tied to the user, e.g. ``/run/user/$(id -u)/$appname/$version`` or + ``$XDG_RUNTIME_DIR/$appname/$version`` + """ + path = os.environ.get("XDG_RUNTIME_DIR", "") + if not path.strip(): + path = f"/run/user/{getuid()}" + return self._append_app_name_and_version(path) + + @property + def site_data_path(self) -> Path: + """:return: data path shared by users. Only return first item, even if ``multipath`` is set to ``True``""" + return self._first_item_as_path_if_multipath(self.site_data_dir) + + @property + def site_config_path(self) -> Path: + """:return: config path shared by the users. Only return first item, even if ``multipath`` is set to ``True``""" + return self._first_item_as_path_if_multipath(self.site_config_dir) + + def _first_item_as_path_if_multipath(self, directory: str) -> Path: + if self.multipath: + # If multipath is True, the first path is returned. + directory = directory.split(os.pathsep)[0] + return Path(directory) + + +def _get_user_dirs_folder(key: str) -> str | None: + """Return directory from user-dirs.dirs config file. See https://freedesktop.org/wiki/Software/xdg-user-dirs/""" + user_dirs_config_path = os.path.join(Unix().user_config_dir, "user-dirs.dirs") + if os.path.exists(user_dirs_config_path): + parser = ConfigParser() + + with open(user_dirs_config_path) as stream: + # Add fake section header, so ConfigParser doesn't complain + parser.read_string(f"[top]\n{stream.read()}") + + if key not in parser["top"]: + return None + + path = parser["top"][key].strip('"') + # Handle relative home paths + path = path.replace("$HOME", os.path.expanduser("~")) + return path + + return None + + +__all__ = [ + "Unix", +] diff --git a/lib/pkg_resources/_vendor/platformdirs/version.py b/lib/pkg_resources/_vendor/platformdirs/version.py new file mode 100644 index 00000000..9f6eb98e --- /dev/null +++ b/lib/pkg_resources/_vendor/platformdirs/version.py @@ -0,0 +1,4 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '2.6.2' +__version_tuple__ = version_tuple = (2, 6, 2) diff --git a/lib/pkg_resources/_vendor/platformdirs/windows.py b/lib/pkg_resources/_vendor/platformdirs/windows.py new file mode 100644 index 00000000..d5c27b34 --- /dev/null +++ b/lib/pkg_resources/_vendor/platformdirs/windows.py @@ -0,0 +1,184 @@ +from __future__ import annotations + +import ctypes +import os +import sys +from functools import lru_cache +from typing import Callable + +from .api import PlatformDirsABC + + +class Windows(PlatformDirsABC): + """`MSDN on where to store app data files + `_. + Makes use of the + `appname `, + `appauthor `, + `version `, + `roaming `, + `opinion `.""" + + @property + def user_data_dir(self) -> str: + """ + :return: data directory tied to the user, e.g. + ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or + ``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming) + """ + const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA" + path = os.path.normpath(get_win_folder(const)) + return self._append_parts(path) + + def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str: + params = [] + if self.appname: + if self.appauthor is not False: + author = self.appauthor or self.appname + params.append(author) + params.append(self.appname) + if opinion_value is not None and self.opinion: + params.append(opinion_value) + if self.version: + params.append(self.version) + return os.path.join(path, *params) + + @property + def site_data_dir(self) -> str: + """:return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``""" + path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA")) + return self._append_parts(path) + + @property + def user_config_dir(self) -> str: + """:return: config directory tied to the user, same as `user_data_dir`""" + return self.user_data_dir + + @property + def site_config_dir(self) -> str: + """:return: config directory shared by the users, same as `site_data_dir`""" + return self.site_data_dir + + @property + def user_cache_dir(self) -> str: + """ + :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g. + ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version`` + """ + path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA")) + return self._append_parts(path, opinion_value="Cache") + + @property + def user_state_dir(self) -> str: + """:return: state directory tied to the user, same as `user_data_dir`""" + return self.user_data_dir + + @property + def user_log_dir(self) -> str: + """ + :return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it + """ + path = self.user_data_dir + if self.opinion: + path = os.path.join(path, "Logs") + return path + + @property + def user_documents_dir(self) -> str: + """ + :return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents`` + """ + return os.path.normpath(get_win_folder("CSIDL_PERSONAL")) + + @property + def user_runtime_dir(self) -> str: + """ + :return: runtime directory tied to the user, e.g. + ``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname`` + """ + path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp")) + return self._append_parts(path) + + +def get_win_folder_from_env_vars(csidl_name: str) -> str: + """Get folder from environment variables.""" + if csidl_name == "CSIDL_PERSONAL": # does not have an environment name + return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents") + + env_var_name = { + "CSIDL_APPDATA": "APPDATA", + "CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE", + "CSIDL_LOCAL_APPDATA": "LOCALAPPDATA", + }.get(csidl_name) + if env_var_name is None: + raise ValueError(f"Unknown CSIDL name: {csidl_name}") + result = os.environ.get(env_var_name) + if result is None: + raise ValueError(f"Unset environment variable: {env_var_name}") + return result + + +def get_win_folder_from_registry(csidl_name: str) -> str: + """Get folder from the registry. + + This is a fallback technique at best. I'm not sure if using the + registry for this guarantees us the correct answer for all CSIDL_* + names. + """ + shell_folder_name = { + "CSIDL_APPDATA": "AppData", + "CSIDL_COMMON_APPDATA": "Common AppData", + "CSIDL_LOCAL_APPDATA": "Local AppData", + "CSIDL_PERSONAL": "Personal", + }.get(csidl_name) + if shell_folder_name is None: + raise ValueError(f"Unknown CSIDL name: {csidl_name}") + if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows + raise NotImplementedError + import winreg + + key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders") + directory, _ = winreg.QueryValueEx(key, shell_folder_name) + return str(directory) + + +def get_win_folder_via_ctypes(csidl_name: str) -> str: + """Get folder with ctypes.""" + csidl_const = { + "CSIDL_APPDATA": 26, + "CSIDL_COMMON_APPDATA": 35, + "CSIDL_LOCAL_APPDATA": 28, + "CSIDL_PERSONAL": 5, + }.get(csidl_name) + if csidl_const is None: + raise ValueError(f"Unknown CSIDL name: {csidl_name}") + + buf = ctypes.create_unicode_buffer(1024) + windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker + windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) + + # Downgrade to short path name if it has highbit chars. + if any(ord(c) > 255 for c in buf): + buf2 = ctypes.create_unicode_buffer(1024) + if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): + buf = buf2 + + return buf.value + + +def _pick_get_win_folder() -> Callable[[str], str]: + if hasattr(ctypes, "windll"): + return get_win_folder_via_ctypes + try: + import winreg # noqa: F401 + except ImportError: + return get_win_folder_from_env_vars + else: + return get_win_folder_from_registry + + +get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder()) + +__all__ = [ + "Windows", +] diff --git a/lib/pkg_resources/_vendor/pyparsing/__init__.py b/lib/pkg_resources/_vendor/pyparsing/__init__.py deleted file mode 100644 index 7802ff15..00000000 --- a/lib/pkg_resources/_vendor/pyparsing/__init__.py +++ /dev/null @@ -1,331 +0,0 @@ -# module pyparsing.py -# -# Copyright (c) 2003-2022 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -__doc__ = """ -pyparsing module - Classes and methods to define and execute parsing grammars -============================================================================= - -The pyparsing module is an alternative approach to creating and -executing simple grammars, vs. the traditional lex/yacc approach, or the -use of regular expressions. With pyparsing, you don't need to learn -a new syntax for defining grammars or matching expressions - the parsing -module provides a library of classes that you use to construct the -grammar directly in Python. - -Here is a program to parse "Hello, World!" (or any greeting of the form -``", !"``), built up using :class:`Word`, -:class:`Literal`, and :class:`And` elements -(the :meth:`'+'` operators create :class:`And` expressions, -and the strings are auto-converted to :class:`Literal` expressions):: - - from pyparsing import Word, alphas - - # define grammar of a greeting - greet = Word(alphas) + "," + Word(alphas) + "!" - - hello = "Hello, World!" - print(hello, "->", greet.parse_string(hello)) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the -self-explanatory class names, and the use of :class:`'+'`, -:class:`'|'`, :class:`'^'` and :class:`'&'` operators. - -The :class:`ParseResults` object returned from -:class:`ParserElement.parseString` can be -accessed as a nested list, a dictionary, or an object with named -attributes. - -The pyparsing module handles some of the problems that are typically -vexing when writing text parsers: - - - extra or missing whitespace (the above program will also handle - "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments - - -Getting Started - ------------------ -Visit the classes :class:`ParserElement` and :class:`ParseResults` to -see the base classes that most other pyparsing -classes inherit from. Use the docstrings for examples of how to: - - - construct literal match expressions from :class:`Literal` and - :class:`CaselessLiteral` classes - - construct character word-group expressions using the :class:`Word` - class - - see how to create repetitive expressions using :class:`ZeroOrMore` - and :class:`OneOrMore` classes - - use :class:`'+'`, :class:`'|'`, :class:`'^'`, - and :class:`'&'` operators to combine simple expressions into - more complex ones - - associate names with your parsed results using - :class:`ParserElement.setResultsName` - - access the parsed data, which is returned as a :class:`ParseResults` - object - - find some helpful expression short-cuts like :class:`delimitedList` - and :class:`oneOf` - - find more useful common expressions in the :class:`pyparsing_common` - namespace class -""" -from typing import NamedTuple - - -class version_info(NamedTuple): - major: int - minor: int - micro: int - releaselevel: str - serial: int - - @property - def __version__(self): - return ( - "{}.{}.{}".format(self.major, self.minor, self.micro) - + ( - "{}{}{}".format( - "r" if self.releaselevel[0] == "c" else "", - self.releaselevel[0], - self.serial, - ), - "", - )[self.releaselevel == "final"] - ) - - def __str__(self): - return "{} {} / {}".format(__name__, self.__version__, __version_time__) - - def __repr__(self): - return "{}.{}({})".format( - __name__, - type(self).__name__, - ", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)), - ) - - -__version_info__ = version_info(3, 0, 9, "final", 0) -__version_time__ = "05 May 2022 07:02 UTC" -__version__ = __version_info__.__version__ -__versionTime__ = __version_time__ -__author__ = "Paul McGuire " - -from .util import * -from .exceptions import * -from .actions import * -from .core import __diag__, __compat__ -from .results import * -from .core import * -from .core import _builtin_exprs as core_builtin_exprs -from .helpers import * -from .helpers import _builtin_exprs as helper_builtin_exprs - -from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode -from .testing import pyparsing_test as testing -from .common import ( - pyparsing_common as common, - _builtin_exprs as common_builtin_exprs, -) - -# define backward compat synonyms -if "pyparsing_unicode" not in globals(): - pyparsing_unicode = unicode -if "pyparsing_common" not in globals(): - pyparsing_common = common -if "pyparsing_test" not in globals(): - pyparsing_test = testing - -core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs - - -__all__ = [ - "__version__", - "__version_time__", - "__author__", - "__compat__", - "__diag__", - "And", - "AtLineStart", - "AtStringStart", - "CaselessKeyword", - "CaselessLiteral", - "CharsNotIn", - "Combine", - "Dict", - "Each", - "Empty", - "FollowedBy", - "Forward", - "GoToColumn", - "Group", - "IndentedBlock", - "Keyword", - "LineEnd", - "LineStart", - "Literal", - "Located", - "PrecededBy", - "MatchFirst", - "NoMatch", - "NotAny", - "OneOrMore", - "OnlyOnce", - "OpAssoc", - "Opt", - "Optional", - "Or", - "ParseBaseException", - "ParseElementEnhance", - "ParseException", - "ParseExpression", - "ParseFatalException", - "ParseResults", - "ParseSyntaxException", - "ParserElement", - "PositionToken", - "QuotedString", - "RecursiveGrammarException", - "Regex", - "SkipTo", - "StringEnd", - "StringStart", - "Suppress", - "Token", - "TokenConverter", - "White", - "Word", - "WordEnd", - "WordStart", - "ZeroOrMore", - "Char", - "alphanums", - "alphas", - "alphas8bit", - "any_close_tag", - "any_open_tag", - "c_style_comment", - "col", - "common_html_entity", - "counted_array", - "cpp_style_comment", - "dbl_quoted_string", - "dbl_slash_comment", - "delimited_list", - "dict_of", - "empty", - "hexnums", - "html_comment", - "identchars", - "identbodychars", - "java_style_comment", - "line", - "line_end", - "line_start", - "lineno", - "make_html_tags", - "make_xml_tags", - "match_only_at_col", - "match_previous_expr", - "match_previous_literal", - "nested_expr", - "null_debug_action", - "nums", - "one_of", - "printables", - "punc8bit", - "python_style_comment", - "quoted_string", - "remove_quotes", - "replace_with", - "replace_html_entity", - "rest_of_line", - "sgl_quoted_string", - "srange", - "string_end", - "string_start", - "trace_parse_action", - "unicode_string", - "with_attribute", - "indentedBlock", - "original_text_for", - "ungroup", - "infix_notation", - "locatedExpr", - "with_class", - "CloseMatch", - "token_map", - "pyparsing_common", - "pyparsing_unicode", - "unicode_set", - "condition_as_parse_action", - "pyparsing_test", - # pre-PEP8 compatibility names - "__versionTime__", - "anyCloseTag", - "anyOpenTag", - "cStyleComment", - "commonHTMLEntity", - "countedArray", - "cppStyleComment", - "dblQuotedString", - "dblSlashComment", - "delimitedList", - "dictOf", - "htmlComment", - "javaStyleComment", - "lineEnd", - "lineStart", - "makeHTMLTags", - "makeXMLTags", - "matchOnlyAtCol", - "matchPreviousExpr", - "matchPreviousLiteral", - "nestedExpr", - "nullDebugAction", - "oneOf", - "opAssoc", - "pythonStyleComment", - "quotedString", - "removeQuotes", - "replaceHTMLEntity", - "replaceWith", - "restOfLine", - "sglQuotedString", - "stringEnd", - "stringStart", - "traceParseAction", - "unicodeString", - "withAttribute", - "indentedBlock", - "originalTextFor", - "infixNotation", - "locatedExpr", - "withClass", - "tokenMap", - "conditionAsParseAction", - "autoname_elements", -] diff --git a/lib/pkg_resources/_vendor/pyparsing/actions.py b/lib/pkg_resources/_vendor/pyparsing/actions.py deleted file mode 100644 index f72c66e7..00000000 --- a/lib/pkg_resources/_vendor/pyparsing/actions.py +++ /dev/null @@ -1,207 +0,0 @@ -# actions.py - -from .exceptions import ParseException -from .util import col - - -class OnlyOnce: - """ - Wrapper for parse actions, to ensure they are only called once. - """ - - def __init__(self, method_call): - from .core import _trim_arity - - self.callable = _trim_arity(method_call) - self.called = False - - def __call__(self, s, l, t): - if not self.called: - results = self.callable(s, l, t) - self.called = True - return results - raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset") - - def reset(self): - """ - Allow the associated parse action to be called once more. - """ - - self.called = False - - -def match_only_at_col(n): - """ - Helper method for defining parse actions that require matching at - a specific column in the input text. - """ - - def verify_col(strg, locn, toks): - if col(locn, strg) != n: - raise ParseException(strg, locn, "matched token not at column {}".format(n)) - - return verify_col - - -def replace_with(repl_str): - """ - Helper method for common parse actions that simply return - a literal value. Especially useful when used with - :class:`transform_string` (). - - Example:: - - num = Word(nums).set_parse_action(lambda toks: int(toks[0])) - na = one_of("N/A NA").set_parse_action(replace_with(math.nan)) - term = na | num - - term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234] - """ - return lambda s, l, t: [repl_str] - - -def remove_quotes(s, l, t): - """ - Helper parse action for removing quotation marks from parsed - quoted strings. - - Example:: - - # by default, quotation marks are included in parsed results - quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] - - # use remove_quotes to strip quotation marks from parsed results - quoted_string.set_parse_action(remove_quotes) - quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] - """ - return t[0][1:-1] - - -def with_attribute(*args, **attr_dict): - """ - Helper to create a validating parse action to be used with start - tags created with :class:`make_xml_tags` or - :class:`make_html_tags`. Use ``with_attribute`` to qualify - a starting tag with a required attribute value, to avoid false - matches on common tags such as ```` or ``
``. - - Call ``with_attribute`` with a series of attribute names and - values. Specify the list of filter attributes names and values as: - - - keyword arguments, as in ``(align="right")``, or - - as an explicit dict with ``**`` operator, when an attribute - name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` - - For attribute names with a namespace prefix, you must use the second - form. Attribute names are matched insensitive to upper/lower case. - - If just testing for ``class`` (with or without a namespace), use - :class:`with_class`. - - To verify that the attribute exists, but without specifying a value, - pass ``with_attribute.ANY_VALUE`` as the value. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this has no type
-
- - ''' - div,div_end = make_html_tags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().set_parse_action(with_attribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attr_dict.items() - attrs = [(k, v) for k, v in attrs] - - def pa(s, l, tokens): - for attrName, attrValue in attrs: - if attrName not in tokens: - raise ParseException(s, l, "no matching attribute " + attrName) - if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException( - s, - l, - "attribute {!r} has value {!r}, must be {!r}".format( - attrName, tokens[attrName], attrValue - ), - ) - - return pa - - -with_attribute.ANY_VALUE = object() - - -def with_class(classname, namespace=""): - """ - Simplified version of :class:`with_attribute` when - matching on a div class - made difficult because ``class`` is - a reserved word in Python. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this <div> has no class
-
- - ''' - div,div_end = make_html_tags("div") - div_grid = div().set_parse_action(with_class("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "{}:class".format(namespace) if namespace else "class" - return with_attribute(**{classattr: classname}) - - -# pre-PEP8 compatibility symbols -replaceWith = replace_with -removeQuotes = remove_quotes -withAttribute = with_attribute -withClass = with_class -matchOnlyAtCol = match_only_at_col diff --git a/lib/pkg_resources/_vendor/pyparsing/common.py b/lib/pkg_resources/_vendor/pyparsing/common.py deleted file mode 100644 index 1859fb79..00000000 --- a/lib/pkg_resources/_vendor/pyparsing/common.py +++ /dev/null @@ -1,424 +0,0 @@ -# common.py -from .core import * -from .helpers import delimited_list, any_open_tag, any_close_tag -from datetime import datetime - - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """Here are some common low-level expressions that may be useful in - jump-starting parser development: - - - numeric forms (:class:`integers`, :class:`reals`, - :class:`scientific notation`) - - common :class:`programming identifiers` - - network addresses (:class:`MAC`, - :class:`IPv4`, :class:`IPv6`) - - ISO8601 :class:`dates` and - :class:`datetime` - - :class:`UUID` - - :class:`comma-separated list` - - :class:`url` - - Parse actions: - - - :class:`convertToInteger` - - :class:`convertToFloat` - - :class:`convertToDate` - - :class:`convertToDatetime` - - :class:`stripHTMLTags` - - :class:`upcaseTokens` - - :class:`downcaseTokens` - - Example:: - - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - - prints:: - - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convert_to_integer = token_map(int) - """ - Parse action for converting parsed integers to Python int - """ - - convert_to_float = token_map(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = ( - Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16)) - ) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = ( - Regex(r"[+-]?\d+") - .set_name("signed integer") - .set_parse_action(convert_to_integer) - ) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = ( - signed_integer().set_parse_action(convert_to_float) - + "/" - + signed_integer().set_parse_action(convert_to_float) - ).set_name("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.add_parse_action(lambda tt: tt[0] / tt[-1]) - - mixed_integer = ( - fraction | signed_integer + Opt(Opt("-").suppress() + fraction) - ).set_name("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.add_parse_action(sum) - - real = ( - Regex(r"[+-]?(?:\d+\.\d*|\.\d+)") - .set_name("real number") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number and returns a float""" - - sci_real = ( - Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)") - .set_name("real number with scientific notation") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number with optional - scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).setName("number").streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = ( - Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?") - .set_name("fnumber") - .set_parse_action(convert_to_float) - ) - """any int or real number, returned as float""" - - identifier = Word(identchars, identbodychars).set_name("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex( - r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}" - ).set_name("IPv4 address") - "IPv4 address (``0.0.0.0 - 255.255.255.255``)" - - _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer") - _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name( - "full IPv6 address" - ) - _short_ipv6_address = ( - Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - + "::" - + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - ).set_name("short IPv6 address") - _short_ipv6_address.add_condition( - lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8 - ) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address") - ipv6_address = Combine( - (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name( - "IPv6 address" - ) - ).set_name("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex( - r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}" - ).set_name("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convert_to_date(fmt: str = "%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) - - Example:: - - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - - prints:: - - [datetime.date(1999, 12, 31)] - """ - - def cvt_fn(ss, ll, tt): - try: - return datetime.strptime(tt[0], fmt).date() - except ValueError as ve: - raise ParseException(ss, ll, str(ve)) - - return cvt_fn - - @staticmethod - def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): - """Helper to create a parse action for converting parsed - datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) - - Example:: - - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - - prints:: - - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - - return cvt_fn - - iso8601_date = Regex( - r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?" - ).set_name("ISO8601 date") - "ISO8601 date (``yyyy-mm-dd``)" - - iso8601_datetime = Regex( - r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?" - ).set_name("ISO8601 datetime") - "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" - - uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID") - "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" - - _html_stripper = any_open_tag.suppress() | any_close_tag.suppress() - - @staticmethod - def strip_html_tags(s: str, l: int, tokens: ParseResults): - """Parse action to remove HTML tags from web page HTML source - - Example:: - - # strip HTML links from normal text - text = 'More info at the pyparsing wiki page' - td, td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - print(table_text.parseString(text).body) - - Prints:: - - More info at the pyparsing wiki page - """ - return pyparsing_common._html_stripper.transform_string(tokens[0]) - - _commasepitem = ( - Combine( - OneOrMore( - ~Literal(",") - + ~LineEnd() - + Word(printables, exclude_chars=",") - + Opt(White(" \t") + ~FollowedBy(LineEnd() | ",")) - ) - ) - .streamline() - .set_name("commaItem") - ) - comma_separated_list = delimited_list( - Opt(quoted_string.copy() | _commasepitem, default="") - ).set_name("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcase_tokens = staticmethod(token_map(lambda t: t.upper())) - """Parse action to convert tokens to upper case.""" - - downcase_tokens = staticmethod(token_map(lambda t: t.lower())) - """Parse action to convert tokens to lower case.""" - - # fmt: off - url = Regex( - # https://mathiasbynens.be/demo/url-regex - # https://gist.github.com/dperini/729294 - r"^" + - # protocol identifier (optional) - # short syntax // still required - r"(?:(?:(?Phttps?|ftp):)?\/\/)" + - # user:pass BasicAuth (optional) - r"(?:(?P\S+(?::\S*)?)@)?" + - r"(?P" + - # IP address exclusion - # private & local networks - r"(?!(?:10|127)(?:\.\d{1,3}){3})" + - r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" + - r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" + - # IP address dotted notation octets - # excludes loopback network 0.0.0.0 - # excludes reserved space >= 224.0.0.0 - # excludes network & broadcast addresses - # (first & last IP address of each class) - r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" + - r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" + - r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + - r"|" + - # host & domain names, may end with dot - # can be replaced by a shortest alternative - # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+ - r"(?:" + - r"(?:" + - r"[a-z0-9\u00a1-\uffff]" + - r"[a-z0-9\u00a1-\uffff_-]{0,62}" + - r")?" + - r"[a-z0-9\u00a1-\uffff]\." + - r")+" + - # TLD identifier name, may end with dot - r"(?:[a-z\u00a1-\uffff]{2,}\.?)" + - r")" + - # port number (optional) - r"(:(?P\d{2,5}))?" + - # resource path (optional) - r"(?P\/[^?# ]*)?" + - # query string (optional) - r"(\?(?P[^#]*))?" + - # fragment (optional) - r"(#(?P\S*))?" + - r"$" - ).set_name("url") - # fmt: on - - # pre-PEP8 compatibility names - convertToInteger = convert_to_integer - convertToFloat = convert_to_float - convertToDate = convert_to_date - convertToDatetime = convert_to_datetime - stripHTMLTags = strip_html_tags - upcaseTokens = upcase_tokens - downcaseTokens = downcase_tokens - - -_builtin_exprs = [ - v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement) -] diff --git a/lib/pkg_resources/_vendor/pyparsing/core.py b/lib/pkg_resources/_vendor/pyparsing/core.py deleted file mode 100644 index 9acba3f3..00000000 --- a/lib/pkg_resources/_vendor/pyparsing/core.py +++ /dev/null @@ -1,5814 +0,0 @@ -# -# core.py -# -import os -import typing -from typing import ( - NamedTuple, - Union, - Callable, - Any, - Generator, - Tuple, - List, - TextIO, - Set, - Sequence, -) -from abc import ABC, abstractmethod -from enum import Enum -import string -import copy -import warnings -import re -import sys -from collections.abc import Iterable -import traceback -import types -from operator import itemgetter -from functools import wraps -from threading import RLock -from pathlib import Path - -from .util import ( - _FifoCache, - _UnboundedCache, - __config_flags, - _collapse_string_to_ranges, - _escape_regex_range_chars, - _bslash, - _flatten, - LRUMemo as _LRUMemo, - UnboundedMemo as _UnboundedMemo, -) -from .exceptions import * -from .actions import * -from .results import ParseResults, _ParseResultsWithOffset -from .unicode import pyparsing_unicode - -_MAX_INT = sys.maxsize -str_type: Tuple[type, ...] = (str, bytes) - -# -# Copyright (c) 2003-2022 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - - -if sys.version_info >= (3, 8): - from functools import cached_property -else: - - class cached_property: - def __init__(self, func): - self._func = func - - def __get__(self, instance, owner=None): - ret = instance.__dict__[self._func.__name__] = self._func(instance) - return ret - - -class __compat__(__config_flags): - """ - A cross-version compatibility configuration for pyparsing features that will be - released in a future version. By setting values in this configuration to True, - those features can be enabled in prior versions for compatibility development - and testing. - - - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping - of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`; - maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1 - behavior - """ - - _type_desc = "compatibility" - - collect_all_And_tokens = True - - _all_names = [__ for __ in locals() if not __.startswith("_")] - _fixed_names = """ - collect_all_And_tokens - """.split() - - -class __diag__(__config_flags): - _type_desc = "diagnostic" - - warn_multiple_tokens_in_named_alternation = False - warn_ungrouped_named_tokens_in_collection = False - warn_name_set_on_empty_Forward = False - warn_on_parse_using_empty_Forward = False - warn_on_assignment_to_Forward = False - warn_on_multiple_string_args_to_oneof = False - warn_on_match_first_with_lshift_operator = False - enable_debug_on_named_expressions = False - - _all_names = [__ for __ in locals() if not __.startswith("_")] - _warning_names = [name for name in _all_names if name.startswith("warn")] - _debug_names = [name for name in _all_names if name.startswith("enable_debug")] - - @classmethod - def enable_all_warnings(cls) -> None: - for name in cls._warning_names: - cls.enable(name) - - -class Diagnostics(Enum): - """ - Diagnostic configuration (all default to disabled) - - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results - name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions - - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results - name is defined on a containing expression with ungrouped subexpressions that also - have results names - - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined - with a results name, but has no contents defined - - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is - defined in a grammar but has never had an expression attached to it - - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined - but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'`` - - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is - incorrectly called with multiple str arguments - - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent - calls to :class:`ParserElement.set_name` - - Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`. - All warnings can be enabled by calling :class:`enable_all_warnings`. - """ - - warn_multiple_tokens_in_named_alternation = 0 - warn_ungrouped_named_tokens_in_collection = 1 - warn_name_set_on_empty_Forward = 2 - warn_on_parse_using_empty_Forward = 3 - warn_on_assignment_to_Forward = 4 - warn_on_multiple_string_args_to_oneof = 5 - warn_on_match_first_with_lshift_operator = 6 - enable_debug_on_named_expressions = 7 - - -def enable_diag(diag_enum: Diagnostics) -> None: - """ - Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`). - """ - __diag__.enable(diag_enum.name) - - -def disable_diag(diag_enum: Diagnostics) -> None: - """ - Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`). - """ - __diag__.disable(diag_enum.name) - - -def enable_all_warnings() -> None: - """ - Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). - """ - __diag__.enable_all_warnings() - - -# hide abstract class -del __config_flags - - -def _should_enable_warnings( - cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str] -) -> bool: - enable = bool(warn_env_var) - for warn_opt in cmd_line_warn_options: - w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split( - ":" - )[:5] - if not w_action.lower().startswith("i") and ( - not (w_message or w_category or w_module) or w_module == "pyparsing" - ): - enable = True - elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""): - enable = False - return enable - - -if _should_enable_warnings( - sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS") -): - enable_all_warnings() - - -# build list of single arg builtins, that can be used as parse actions -_single_arg_builtins = { - sum, - len, - sorted, - reversed, - list, - tuple, - set, - any, - all, - min, - max, -} - -_generatorType = types.GeneratorType -ParseAction = Union[ - Callable[[], Any], - Callable[[ParseResults], Any], - Callable[[int, ParseResults], Any], - Callable[[str, int, ParseResults], Any], -] -ParseCondition = Union[ - Callable[[], bool], - Callable[[ParseResults], bool], - Callable[[int, ParseResults], bool], - Callable[[str, int, ParseResults], bool], -] -ParseFailAction = Callable[[str, int, "ParserElement", Exception], None] -DebugStartAction = Callable[[str, int, "ParserElement", bool], None] -DebugSuccessAction = Callable[ - [str, int, int, "ParserElement", ParseResults, bool], None -] -DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None] - - -alphas = string.ascii_uppercase + string.ascii_lowercase -identchars = pyparsing_unicode.Latin1.identchars -identbodychars = pyparsing_unicode.Latin1.identbodychars -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -printables = "".join([c for c in string.printable if c not in string.whitespace]) - -_trim_arity_call_line: traceback.StackSummary = None - - -def _trim_arity(func, max_limit=3): - """decorator to trim function calls to match the arity of the target""" - global _trim_arity_call_line - - if func in _single_arg_builtins: - return lambda s, l, t: func(t) - - limit = 0 - found_arity = False - - def extract_tb(tb, limit=0): - frames = traceback.extract_tb(tb, limit=limit) - frame_summary = frames[-1] - return [frame_summary[:2]] - - # synthesize what would be returned by traceback.extract_stack at the call to - # user's parse action 'func', so that we don't incur call penalty at parse time - - # fmt: off - LINE_DIFF = 7 - # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND - # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! - _trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1]) - pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF) - - def wrapper(*args): - nonlocal found_arity, limit - while 1: - try: - ret = func(*args[limit:]) - found_arity = True - return ret - except TypeError as te: - # re-raise TypeErrors if they did not come from our arity testing - if found_arity: - raise - else: - tb = te.__traceback__ - trim_arity_type_error = ( - extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth - ) - del tb - - if trim_arity_type_error: - if limit < max_limit: - limit += 1 - continue - - raise - # fmt: on - - # copy func name to wrapper for sensible debug output - # (can't use functools.wraps, since that messes with function signature) - func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) - wrapper.__name__ = func_name - wrapper.__doc__ = func.__doc__ - - return wrapper - - -def condition_as_parse_action( - fn: ParseCondition, message: str = None, fatal: bool = False -) -> ParseAction: - """ - Function to convert a simple predicate function that returns ``True`` or ``False`` - into a parse action. Can be used in places when a parse action is required - and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition - to an operator level in :class:`infix_notation`). - - Optional keyword arguments: - - - ``message`` - define a custom message to be used in the raised exception - - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately; - otherwise will raise :class:`ParseException` - - """ - msg = message if message is not None else "failed user-defined condition" - exc_type = ParseFatalException if fatal else ParseException - fn = _trim_arity(fn) - - @wraps(fn) - def pa(s, l, t): - if not bool(fn(s, l, t)): - raise exc_type(s, l, msg) - - return pa - - -def _default_start_debug_action( - instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False -): - cache_hit_str = "*" if cache_hit else "" - print( - ( - "{}Match {} at loc {}({},{})\n {}\n {}^".format( - cache_hit_str, - expr, - loc, - lineno(loc, instring), - col(loc, instring), - line(loc, instring), - " " * (col(loc, instring) - 1), - ) - ) - ) - - -def _default_success_debug_action( - instring: str, - startloc: int, - endloc: int, - expr: "ParserElement", - toks: ParseResults, - cache_hit: bool = False, -): - cache_hit_str = "*" if cache_hit else "" - print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list())) - - -def _default_exception_debug_action( - instring: str, - loc: int, - expr: "ParserElement", - exc: Exception, - cache_hit: bool = False, -): - cache_hit_str = "*" if cache_hit else "" - print( - "{}Match {} failed, {} raised: {}".format( - cache_hit_str, expr, type(exc).__name__, exc - ) - ) - - -def null_debug_action(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - - -class ParserElement(ABC): - """Abstract base level parser element class.""" - - DEFAULT_WHITE_CHARS: str = " \n\t\r" - verbose_stacktrace: bool = False - _literalStringClass: typing.Optional[type] = None - - @staticmethod - def set_default_whitespace_chars(chars: str) -> None: - r""" - Overrides the default whitespace chars - - Example:: - - # default whitespace chars are space, and newline - Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] - - # change to just treat newline as significant - ParserElement.set_default_whitespace_chars(" \t") - Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def'] - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - - # update whitespace all parse expressions defined in this module - for expr in _builtin_exprs: - if expr.copyDefaultWhiteChars: - expr.whiteChars = set(chars) - - @staticmethod - def inline_literals_using(cls: type) -> None: - """ - Set class to be used for inclusion of string literals into a parser. - - Example:: - - # default literal class used is Literal - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - - # change to Suppress - ParserElement.inline_literals_using(Suppress) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parse_string("1999/12/31") # -> ['1999', '12', '31'] - """ - ParserElement._literalStringClass = cls - - class DebugActions(NamedTuple): - debug_try: typing.Optional[DebugStartAction] - debug_match: typing.Optional[DebugSuccessAction] - debug_fail: typing.Optional[DebugExceptionAction] - - def __init__(self, savelist: bool = False): - self.parseAction: List[ParseAction] = list() - self.failAction: typing.Optional[ParseFailAction] = None - self.customName = None - self._defaultName = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - self.copyDefaultWhiteChars = True - # used when checking for left-recursion - self.mayReturnEmpty = False - self.keepTabs = False - self.ignoreExprs: List["ParserElement"] = list() - self.debug = False - self.streamlined = False - # optimize exception handling for subclasses that don't advance parse index - self.mayIndexError = True - self.errmsg = "" - # mark results names as modal (report only last) or cumulative (list all) - self.modalResults = True - # custom debug actions - self.debugActions = self.DebugActions(None, None, None) - # avoid redundant calls to preParse - self.callPreparse = True - self.callDuringTry = False - self.suppress_warnings_: List[Diagnostics] = [] - - def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement": - """ - Suppress warnings emitted for a particular diagnostic on this expression. - - Example:: - - base = pp.Forward() - base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward) - - # statement would normally raise a warning, but is now suppressed - print(base.parseString("x")) - - """ - self.suppress_warnings_.append(warning_type) - return self - - def copy(self) -> "ParserElement": - """ - Make a copy of this :class:`ParserElement`. Useful for defining - different parse actions for the same parsing pattern, using copies of - the original parse element. - - Example:: - - integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) - integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K") - integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - - print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M")) - - prints:: - - [5120, 100, 655360, 268435456] - - Equivalent form of ``expr.copy()`` is just ``expr()``:: - - integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - """ - cpy = copy.copy(self) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - return cpy - - def set_results_name( - self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False - ) -> "ParserElement": - """ - Define name for referencing matching tokens as a nested attribute - of the returned parse results. - - Normally, results names are assigned as you would assign keys in a dict: - any existing value is overwritten by later values. If it is necessary to - keep all values captured for a particular results name, call ``set_results_name`` - with ``list_all_matches`` = True. - - NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - ``expr("name")`` in place of ``expr.set_results_name("name")`` - - see :class:`__call__`. If ``list_all_matches`` is required, use - ``expr("name*")``. - - Example:: - - date_str = (integer.set_results_name("year") + '/' - + integer.set_results_name("month") + '/' - + integer.set_results_name("day")) - - # equivalent form: - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - """ - listAllMatches = listAllMatches or list_all_matches - return self._setResultsName(name, listAllMatches) - - def _setResultsName(self, name, listAllMatches=False): - if name is None: - return self - newself = self.copy() - if name.endswith("*"): - name = name[:-1] - listAllMatches = True - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def set_break(self, break_flag: bool = True) -> "ParserElement": - """ - Method to invoke the Python pdb debugger when this element is - about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to - disable. - """ - if break_flag: - _parseMethod = self._parse - - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - - # this call to pdb.set_trace() is intentional, not a checkin error - pdb.set_trace() - return _parseMethod(instring, loc, doActions, callPreParse) - - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse, "_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": - """ - Define one or more actions to perform when successfully matching parse element definition. - - Parse actions can be called to perform data conversions, do extra validation, - update external data structures, or enhance or replace the parsed tokens. - Each parse action ``fn`` is a callable method with 0-3 arguments, called as - ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: - - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object - - The parsed tokens are passed to the parse action as ParseResults. They can be - modified in place using list-style append, extend, and pop operations to update - the parsed list elements; and with dictionary-style item set and del operations - to add, update, or remove any named results. If the tokens are modified in place, - it is not necessary to return them with a return statement. - - Parse actions can also completely replace the given tokens, with another ``ParseResults`` - object, or with some entirely different object (common for parse actions that perform data - conversions). A convenient way to build a new parse result is to define the values - using a dict, and then create the return value using :class:`ParseResults.from_dict`. - - If None is passed as the ``fn`` parse action, all previously added parse actions for this - expression are cleared. - - Optional keyword arguments: - - - call_during_try = (default= ``False``) indicate if parse action should be run during - lookaheads and alternate testing. For parse actions that have side effects, it is - important to only call the parse action once it is determined that it is being - called as part of a successful parse. For parse actions that perform additional - validation, then call_during_try should be passed as True, so that the validation - code is included in the preliminary "try" parses. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`parse_string` for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - - Example:: - - # parse dates in the form YYYY/MM/DD - - # use parse action to convert toks from str to int at parse time - def convert_to_int(toks): - return int(toks[0]) - - # use a parse action to verify that the date is a valid date - def is_valid_date(instring, loc, toks): - from datetime import date - year, month, day = toks[::2] - try: - date(year, month, day) - except ValueError: - raise ParseException(instring, loc, "invalid date given") - - integer = Word(nums) - date_str = integer + '/' + integer + '/' + integer - - # add parse actions - integer.set_parse_action(convert_to_int) - date_str.set_parse_action(is_valid_date) - - # note that integer fields are now ints, not strings - date_str.run_tests(''' - # successful parse - note that integer fields were converted to ints - 1999/12/31 - - # fail - invalid date - 1999/13/31 - ''') - """ - if list(fns) == [None]: - self.parseAction = [] - else: - if not all(callable(fn) for fn in fns): - raise TypeError("parse actions must be callable") - self.parseAction = [_trim_arity(fn) for fn in fns] - self.callDuringTry = kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": - """ - Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`. - - See examples in :class:`copy`. - """ - self.parseAction += [_trim_arity(fn) for fn in fns] - self.callDuringTry = self.callDuringTry or kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement": - """Add a boolean predicate function to expression's list of parse actions. See - :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``, - functions passed to ``add_condition`` need to return boolean success/fail of the condition. - - Optional keyword arguments: - - - message = define a custom message to be used in the raised exception - - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise - ParseException - - call_during_try = boolean to indicate if this method should be called during internal tryParse calls, - default=False - - Example:: - - integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) - year_int = integer.copy() - year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") - date_str = year_int + '/' + integer + '/' + integer - - result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), - (line:1, col:1) - """ - for fn in fns: - self.parseAction.append( - condition_as_parse_action( - fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False) - ) - ) - - self.callDuringTry = self.callDuringTry or kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def set_fail_action(self, fn: ParseFailAction) -> "ParserElement": - """ - Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - ``fn(s, loc, expr, err)`` where: - - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - - The function returns no value. It may throw :class:`ParseFatalException` - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables(self, instring, loc): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc, dummy = e._parse(instring, loc) - exprsFound = True - except ParseException: - pass - return loc - - def preParse(self, instring, loc): - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - - if self.skipWhitespace: - instrlen = len(instring) - white_chars = self.whiteChars - while loc < instrlen and instring[loc] in white_chars: - loc += 1 - - return loc - - def parseImpl(self, instring, loc, doActions=True): - return loc, [] - - def postParse(self, instring, loc, tokenlist): - return tokenlist - - # @profile - def _parseNoCache( - self, instring, loc, doActions=True, callPreParse=True - ) -> Tuple[int, ParseResults]: - TRY, MATCH, FAIL = 0, 1, 2 - debugging = self.debug # and doActions) - len_instring = len(instring) - - if debugging or self.failAction: - # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring))) - try: - if callPreParse and self.callPreparse: - pre_loc = self.preParse(instring, loc) - else: - pre_loc = loc - tokens_start = pre_loc - if self.debugActions.debug_try: - self.debugActions.debug_try(instring, tokens_start, self, False) - if self.mayIndexError or pre_loc >= len_instring: - try: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except IndexError: - raise ParseException(instring, len_instring, self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except Exception as err: - # print("Exception raised:", err) - if self.debugActions.debug_fail: - self.debugActions.debug_fail( - instring, tokens_start, self, err, False - ) - if self.failAction: - self.failAction(instring, tokens_start, self, err) - raise - else: - if callPreParse and self.callPreparse: - pre_loc = self.preParse(instring, loc) - else: - pre_loc = loc - tokens_start = pre_loc - if self.mayIndexError or pre_loc >= len_instring: - try: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except IndexError: - raise ParseException(instring, len_instring, self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - - tokens = self.postParse(instring, loc, tokens) - - ret_tokens = ParseResults( - tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults - ) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - try: - tokens = fn(instring, tokens_start, ret_tokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - raise exc from parse_action_exc - - if tokens is not None and tokens is not ret_tokens: - ret_tokens = ParseResults( - tokens, - self.resultsName, - asList=self.saveAsList - and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults, - ) - except Exception as err: - # print "Exception raised in user parse action:", err - if self.debugActions.debug_fail: - self.debugActions.debug_fail( - instring, tokens_start, self, err, False - ) - raise - else: - for fn in self.parseAction: - try: - tokens = fn(instring, tokens_start, ret_tokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - raise exc from parse_action_exc - - if tokens is not None and tokens is not ret_tokens: - ret_tokens = ParseResults( - tokens, - self.resultsName, - asList=self.saveAsList - and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults, - ) - if debugging: - # print("Matched", self, "->", ret_tokens.as_list()) - if self.debugActions.debug_match: - self.debugActions.debug_match( - instring, tokens_start, loc, self, ret_tokens, False - ) - - return loc, ret_tokens - - def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int: - try: - return self._parse(instring, loc, doActions=False)[0] - except ParseFatalException: - if raise_fatal: - raise - raise ParseException(instring, loc, self.errmsg, self) - - def can_parse_next(self, instring: str, loc: int) -> bool: - try: - self.try_parse(instring, loc) - except (ParseException, IndexError): - return False - else: - return True - - # cache for left-recursion in Forward references - recursion_lock = RLock() - recursion_memos: typing.Dict[ - Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]] - ] = {} - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - packrat_cache = ( - {} - ) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail - packrat_cache_lock = RLock() - packrat_cache_stats = [0, 0] - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( - self, instring, loc, doActions=True, callPreParse=True - ) -> Tuple[int, ParseResults]: - HIT, MISS = 0, 1 - TRY, MATCH, FAIL = 0, 1, 2 - lookup = (self, instring, loc, callPreParse, doActions) - with ParserElement.packrat_cache_lock: - cache = ParserElement.packrat_cache - value = cache.get(lookup) - if value is cache.not_in_cache: - ParserElement.packrat_cache_stats[MISS] += 1 - try: - value = self._parseNoCache(instring, loc, doActions, callPreParse) - except ParseBaseException as pe: - # cache a copy of the exception, without the traceback - cache.set(lookup, pe.__class__(*pe.args)) - raise - else: - cache.set(lookup, (value[0], value[1].copy(), loc)) - return value - else: - ParserElement.packrat_cache_stats[HIT] += 1 - if self.debug and self.debugActions.debug_try: - try: - self.debugActions.debug_try(instring, loc, self, cache_hit=True) - except TypeError: - pass - if isinstance(value, Exception): - if self.debug and self.debugActions.debug_fail: - try: - self.debugActions.debug_fail( - instring, loc, self, value, cache_hit=True - ) - except TypeError: - pass - raise value - - loc_, result, endloc = value[0], value[1].copy(), value[2] - if self.debug and self.debugActions.debug_match: - try: - self.debugActions.debug_match( - instring, loc_, endloc, self, result, cache_hit=True - ) - except TypeError: - pass - - return loc_, result - - _parse = _parseNoCache - - @staticmethod - def reset_cache() -> None: - ParserElement.packrat_cache.clear() - ParserElement.packrat_cache_stats[:] = [0] * len( - ParserElement.packrat_cache_stats - ) - ParserElement.recursion_memos.clear() - - _packratEnabled = False - _left_recursion_enabled = False - - @staticmethod - def disable_memoization() -> None: - """ - Disables active Packrat or Left Recursion parsing and their memoization - - This method also works if neither Packrat nor Left Recursion are enabled. - This makes it safe to call before activating Packrat nor Left Recursion - to clear any previous settings. - """ - ParserElement.reset_cache() - ParserElement._left_recursion_enabled = False - ParserElement._packratEnabled = False - ParserElement._parse = ParserElement._parseNoCache - - @staticmethod - def enable_left_recursion( - cache_size_limit: typing.Optional[int] = None, *, force=False - ) -> None: - """ - Enables "bounded recursion" parsing, which allows for both direct and indirect - left-recursion. During parsing, left-recursive :class:`Forward` elements are - repeatedly matched with a fixed recursion depth that is gradually increased - until finding the longest match. - - Example:: - - import pyparsing as pp - pp.ParserElement.enable_left_recursion() - - E = pp.Forward("E") - num = pp.Word(pp.nums) - # match `num`, or `num '+' num`, or `num '+' num '+' num`, ... - E <<= E + '+' - num | num - - print(E.parse_string("1+2+3")) - - Recursion search naturally memoizes matches of ``Forward`` elements and may - thus skip reevaluation of parse actions during backtracking. This may break - programs with parse actions which rely on strict ordering of side-effects. - - Parameters: - - - cache_size_limit - (default=``None``) - memoize at most this many - ``Forward`` elements during matching; if ``None`` (the default), - memoize all ``Forward`` elements. - - Bounded Recursion parsing works similar but not identical to Packrat parsing, - thus the two cannot be used together. Use ``force=True`` to disable any - previous, conflicting settings. - """ - if force: - ParserElement.disable_memoization() - elif ParserElement._packratEnabled: - raise RuntimeError("Packrat and Bounded Recursion are not compatible") - if cache_size_limit is None: - ParserElement.recursion_memos = _UnboundedMemo() - elif cache_size_limit > 0: - ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) - else: - raise NotImplementedError("Memo size of %s" % cache_size_limit) - ParserElement._left_recursion_enabled = True - - @staticmethod - def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None: - """ - Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - Parameters: - - - cache_size_limit - (default= ``128``) - if an integer value is provided - will limit the size of the packrat cache; if None is passed, then - the cache size will be unbounded; if 0 is passed, the cache will - be effectively disabled. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method :class:`ParserElement.enable_packrat`. - For best results, call ``enable_packrat()`` immediately after - importing pyparsing. - - Example:: - - import pyparsing - pyparsing.ParserElement.enable_packrat() - - Packrat parsing works similar but not identical to Bounded Recursion parsing, - thus the two cannot be used together. Use ``force=True`` to disable any - previous, conflicting settings. - """ - if force: - ParserElement.disable_memoization() - elif ParserElement._left_recursion_enabled: - raise RuntimeError("Packrat and Bounded Recursion are not compatible") - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - if cache_size_limit is None: - ParserElement.packrat_cache = _UnboundedCache() - else: - ParserElement.packrat_cache = _FifoCache(cache_size_limit) - ParserElement._parse = ParserElement._parseCache - - def parse_string( - self, instring: str, parse_all: bool = False, *, parseAll: bool = False - ) -> ParseResults: - """ - Parse a string with respect to the parser definition. This function is intended as the primary interface to the - client code. - - :param instring: The input string to be parsed. - :param parse_all: If set, the entire input string must match the grammar. - :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release. - :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar. - :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or - an object with attributes if the given parser includes results names. - - If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This - is also equivalent to ending the grammar with :class:`StringEnd`(). - - To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are - converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string - contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string - being parsed, one can ensure a consistent view of the input string by doing one of the following: - - - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`), - - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the - parse action's ``s`` argument, or - - explicitly expand the tabs in your input string before calling ``parse_string``. - - Examples: - - By default, partial matches are OK. - - >>> res = Word('a').parse_string('aaaaabaaa') - >>> print(res) - ['aaaaa'] - - The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children - directly to see more examples. - - It raises an exception if parse_all flag is set and instring does not match the whole grammar. - - >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True) - Traceback (most recent call last): - ... - pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6) - """ - parseAll = parse_all or parseAll - - ParserElement.reset_cache() - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse(instring, 0) - if parseAll: - loc = self.preParse(instring, loc) - se = Empty() + StringEnd() - se._parse(instring, loc) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - raise exc.with_traceback(None) - else: - return tokens - - def scan_string( - self, - instring: str, - max_matches: int = _MAX_INT, - overlap: bool = False, - *, - debug: bool = False, - maxMatches: int = _MAX_INT, - ) -> Generator[Tuple[ParseResults, int, int], None, None]: - """ - Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - ``max_matches`` argument, to clip scanning after 'n' matches are found. If - ``overlap`` is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See :class:`parse_string` for more information on parsing - strings with embedded tabs. - - Example:: - - source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" - print(source) - for tokens, start, end in Word(alphas).scan_string(source): - print(' '*start + '^'*(end-start)) - print(' '*start + tokens[0]) - - prints:: - - sldjf123lsdjjkf345sldkjf879lkjsfd987 - ^^^^^ - sldjf - ^^^^^^^ - lsdjjkf - ^^^^^^ - sldkjf - ^^^^^^ - lkjsfd - """ - maxMatches = min(maxMatches, max_matches) - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = str(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn(instring, loc) - nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) - except ParseException: - loc = preloc + 1 - else: - if nextLoc > loc: - matches += 1 - if debug: - print( - { - "tokens": tokens.asList(), - "start": preloc, - "end": nextLoc, - } - ) - yield tokens, preloc, nextLoc - if overlap: - nextloc = preparseFn(instring, loc) - if nextloc > loc: - loc = nextLoc - else: - loc += 1 - else: - loc = nextLoc - else: - loc = preloc + 1 - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def transform_string(self, instring: str, *, debug: bool = False) -> str: - """ - Extension to :class:`scan_string`, to modify matching text with modified tokens that may - be returned from a parse action. To use ``transform_string``, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking ``transform_string()`` on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. ``transform_string()`` returns the resulting transformed string. - - Example:: - - wd = Word(alphas) - wd.set_parse_action(lambda toks: toks[0].title()) - - print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york.")) - - prints:: - - Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. - """ - out: List[str] = [] - lastE = 0 - # force preservation of s, to minimize unwanted transformation of string, and to - # keep string locs straight between transform_string and scan_string - self.keepTabs = True - try: - for t, s, e in self.scan_string(instring, debug=debug): - out.append(instring[lastE:s]) - if t: - if isinstance(t, ParseResults): - out += t.as_list() - elif isinstance(t, Iterable) and not isinstance(t, str_type): - out.extend(t) - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - out = [o for o in out if o] - return "".join([str(s) for s in _flatten(out)]) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def search_string( - self, - instring: str, - max_matches: int = _MAX_INT, - *, - debug: bool = False, - maxMatches: int = _MAX_INT, - ) -> ParseResults: - """ - Another extension to :class:`scan_string`, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - ``max_matches`` argument, to clip searching after 'n' matches are found. - - Example:: - - # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters - cap_word = Word(alphas.upper(), alphas.lower()) - - print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")) - - # the sum() builtin can be used to merge results into a single ParseResults object - print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))) - - prints:: - - [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] - ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] - """ - maxMatches = min(maxMatches, max_matches) - try: - return ParseResults( - [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)] - ) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def split( - self, - instring: str, - maxsplit: int = _MAX_INT, - include_separators: bool = False, - *, - includeSeparators=False, - ) -> Generator[str, None, None]: - """ - Generator method to split a string using the given expression as a separator. - May be called with optional ``maxsplit`` argument, to limit the number of splits; - and the optional ``include_separators`` argument (default= ``False``), if the separating - matching text should be included in the split results. - - Example:: - - punc = one_of(list(".,;:/-!?")) - print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) - - prints:: - - ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] - """ - includeSeparators = includeSeparators or include_separators - last = 0 - for t, s, e in self.scan_string(instring, max_matches=maxsplit): - yield instring[last:s] - if includeSeparators: - yield t[0] - last = e - yield instring[last:] - - def __add__(self, other) -> "ParserElement": - """ - Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement` - converts them to :class:`Literal`s by default. - - Example:: - - greet = Word(alphas) + "," + Word(alphas) + "!" - hello = "Hello, World!" - print(hello, "->", greet.parse_string(hello)) - - prints:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - - ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. - - Literal('start') + ... + Literal('end') - - is equivalent to: - - Literal('start') + SkipTo('end')("_skipped*") + Literal('end') - - Note that the skipped text is returned with '_skipped' as a results name, - and to support having multiple skips in the same parser, the value returned is - a list of all skipped text. - """ - if other is Ellipsis: - return _PendingSkip(self) - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return And([self, other]) - - def __radd__(self, other) -> "ParserElement": - """ - Implementation of ``+`` operator when left operand is not a :class:`ParserElement` - """ - if other is Ellipsis: - return SkipTo(self)("_skipped*") + self - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other + self - - def __sub__(self, other) -> "ParserElement": - """ - Implementation of ``-`` operator, returns :class:`And` with error stop - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return self + And._ErrorStop() + other - - def __rsub__(self, other) -> "ParserElement": - """ - Implementation of ``-`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other - self - - def __mul__(self, other) -> "ParserElement": - """ - Implementation of ``*`` operator, allows use of ``expr * 3`` in place of - ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer - tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples - may also include ``None`` as in: - - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` - - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` - - Note that ``expr*(None, n)`` does not raise an exception if - more than n exprs exist in the input stream; that is, - ``expr*(None, n)`` does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - ``expr*(None, n) + ~expr`` - """ - if other is Ellipsis: - other = (0, None) - elif isinstance(other, tuple) and other[:1] == (Ellipsis,): - other = ((0,) + other[1:] + (None,))[:2] - - if isinstance(other, int): - minElements, optElements = other, 0 - elif isinstance(other, tuple): - other = tuple(o if o is not Ellipsis else None for o in other) - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0], int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self * other[0] + ZeroOrMore(self) - elif isinstance(other[0], int) and isinstance(other[1], int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError( - "cannot multiply ParserElement and ({}) objects".format( - ",".join(type(item).__name__ for item in other) - ) - ) - else: - raise TypeError( - "cannot multiply ParserElement and {} objects".format( - type(other).__name__ - ) - ) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError( - "second tuple value must be greater or equal to first tuple value" - ) - if minElements == optElements == 0: - return And([]) - - if optElements: - - def makeOptionalList(n): - if n > 1: - return Opt(self + makeOptionalList(n - 1)) - else: - return Opt(self) - - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self] * minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self] * minElements) - return ret - - def __rmul__(self, other) -> "ParserElement": - return self.__mul__(other) - - def __or__(self, other) -> "ParserElement": - """ - Implementation of ``|`` operator - returns :class:`MatchFirst` - """ - if other is Ellipsis: - return _PendingSkip(self, must_skip=True) - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return MatchFirst([self, other]) - - def __ror__(self, other) -> "ParserElement": - """ - Implementation of ``|`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other | self - - def __xor__(self, other) -> "ParserElement": - """ - Implementation of ``^`` operator - returns :class:`Or` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return Or([self, other]) - - def __rxor__(self, other) -> "ParserElement": - """ - Implementation of ``^`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other ^ self - - def __and__(self, other) -> "ParserElement": - """ - Implementation of ``&`` operator - returns :class:`Each` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return Each([self, other]) - - def __rand__(self, other) -> "ParserElement": - """ - Implementation of ``&`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other & self - - def __invert__(self) -> "ParserElement": - """ - Implementation of ``~`` operator - returns :class:`NotAny` - """ - return NotAny(self) - - # disable __iter__ to override legacy use of sequential access to __getitem__ to - # iterate over a sequence - __iter__ = None - - def __getitem__(self, key): - """ - use ``[]`` indexing notation as a short form for expression repetition: - - - ``expr[n]`` is equivalent to ``expr*n`` - - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` - - ``expr[n, ...]`` or ``expr[n,]`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` - - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` - - ``None`` may be used in place of ``...``. - - Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception - if more than ``n`` ``expr``s exist in the input stream. If this behavior is - desired, then write ``expr[..., n] + ~expr``. - """ - - # convert single arg keys to tuples - try: - if isinstance(key, str_type): - key = (key,) - iter(key) - except TypeError: - key = (key, key) - - if len(key) > 2: - raise TypeError( - "only 1 or 2 index arguments supported ({}{})".format( - key[:5], "... [{}]".format(len(key)) if len(key) > 5 else "" - ) - ) - - # clip to 2 elements - ret = self * tuple(key[:2]) - return ret - - def __call__(self, name: str = None) -> "ParserElement": - """ - Shortcut for :class:`set_results_name`, with ``list_all_matches=False``. - - If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be - passed as ``True``. - - If ``name` is omitted, same as calling :class:`copy`. - - Example:: - - # these are equivalent - userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno") - userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") - """ - if name is not None: - return self._setResultsName(name) - else: - return self.copy() - - def suppress(self) -> "ParserElement": - """ - Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress(self) - - def ignore_whitespace(self, recursive: bool = True) -> "ParserElement": - """ - Enables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. - - :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any) - """ - self.skipWhitespace = True - return self - - def leave_whitespace(self, recursive: bool = True) -> "ParserElement": - """ - Disables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - - :param recursive: If true (the default), also disable whitespace skipping in child elements (if any) - """ - self.skipWhitespace = False - return self - - def set_whitespace_chars( - self, chars: Union[Set[str], str], copy_defaults: bool = False - ) -> "ParserElement": - """ - Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = set(chars) - self.copyDefaultWhiteChars = copy_defaults - return self - - def parse_with_tabs(self) -> "ParserElement": - """ - Overrides default behavior to expand ```` s to spaces before parsing the input string. - Must be called before ``parse_string`` when the input grammar contains elements that - match ```` characters. - """ - self.keepTabs = True - return self - - def ignore(self, other: "ParserElement") -> "ParserElement": - """ - Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - - Example:: - - patt = Word(alphas)[1, ...] - patt.parse_string('ablaj /* comment */ lskjd') - # -> ['ablaj'] - - patt.ignore(c_style_comment) - patt.parse_string('ablaj /* comment */ lskjd') - # -> ['ablaj', 'lskjd'] - """ - import typing - - if isinstance(other, str_type): - other = Suppress(other) - - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - self.ignoreExprs.append(other) - else: - self.ignoreExprs.append(Suppress(other.copy())) - return self - - def set_debug_actions( - self, - start_action: DebugStartAction, - success_action: DebugSuccessAction, - exception_action: DebugExceptionAction, - ) -> "ParserElement": - """ - Customize display of debugging messages while doing pattern matching: - - - ``start_action`` - method to be called when an expression is about to be parsed; - should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)`` - - - ``success_action`` - method to be called when an expression has successfully parsed; - should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)`` - - - ``exception_action`` - method to be called when expression fails to parse; - should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)`` - """ - self.debugActions = self.DebugActions( - start_action or _default_start_debug_action, - success_action or _default_success_debug_action, - exception_action or _default_exception_debug_action, - ) - self.debug = True - return self - - def set_debug(self, flag: bool = True) -> "ParserElement": - """ - Enable display of debugging messages while doing pattern matching. - Set ``flag`` to ``True`` to enable, ``False`` to disable. - - Example:: - - wd = Word(alphas).set_name("alphaword") - integer = Word(nums).set_name("numword") - term = wd | integer - - # turn on debugging for wd - wd.set_debug() - - term[1, ...].parse_string("abc 123 xyz 890") - - prints:: - - Match alphaword at loc 0(1,1) - Matched alphaword -> ['abc'] - Match alphaword at loc 3(1,4) - Exception raised:Expected alphaword (at char 4), (line:1, col:5) - Match alphaword at loc 7(1,8) - Matched alphaword -> ['xyz'] - Match alphaword at loc 11(1,12) - Exception raised:Expected alphaword (at char 12), (line:1, col:13) - Match alphaword at loc 15(1,16) - Exception raised:Expected alphaword (at char 15), (line:1, col:16) - - The output shown is that produced by the default debug actions - custom debug actions can be - specified using :class:`set_debug_actions`. Prior to attempting - to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` - is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` - message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression, - which makes debugging and exception messages easier to understand - for instance, the default - name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``. - """ - if flag: - self.set_debug_actions( - _default_start_debug_action, - _default_success_debug_action, - _default_exception_debug_action, - ) - else: - self.debug = False - return self - - @property - def default_name(self) -> str: - if self._defaultName is None: - self._defaultName = self._generateDefaultName() - return self._defaultName - - @abstractmethod - def _generateDefaultName(self): - """ - Child classes must define this method, which defines how the ``default_name`` is set. - """ - - def set_name(self, name: str) -> "ParserElement": - """ - Define name for this expression, makes debugging and exception messages clearer. - Example:: - Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1) - Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) - """ - self.customName = name - self.errmsg = "Expected " + self.name - if __diag__.enable_debug_on_named_expressions: - self.set_debug() - return self - - @property - def name(self) -> str: - # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name - return self.customName if self.customName is not None else self.default_name - - def __str__(self) -> str: - return self.name - - def __repr__(self) -> str: - return str(self) - - def streamline(self) -> "ParserElement": - self.streamlined = True - self._defaultName = None - return self - - def recurse(self) -> Sequence["ParserElement"]: - return [] - - def _checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.recurse(): - e._checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None) -> None: - """ - Check defined expressions for valid structure, check for infinite recursive definitions. - """ - self._checkRecursion([]) - - def parse_file( - self, - file_or_filename: Union[str, Path, TextIO], - encoding: str = "utf-8", - parse_all: bool = False, - *, - parseAll: bool = False, - ) -> ParseResults: - """ - Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - parseAll = parseAll or parse_all - try: - file_contents = file_or_filename.read() - except AttributeError: - with open(file_or_filename, "r", encoding=encoding) as f: - file_contents = f.read() - try: - return self.parse_string(file_contents, parseAll) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def __eq__(self, other): - if self is other: - return True - elif isinstance(other, str_type): - return self.matches(other, parse_all=True) - elif isinstance(other, ParserElement): - return vars(self) == vars(other) - return False - - def __hash__(self): - return id(self) - - def matches( - self, test_string: str, parse_all: bool = True, *, parseAll: bool = True - ) -> bool: - """ - Method for quick testing of a parser against a test string. Good for simple - inline microtests of sub expressions while building up larger parser. - - Parameters: - - ``test_string`` - to test against this expression for a match - - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests - - Example:: - - expr = Word(nums) - assert expr.matches("100") - """ - parseAll = parseAll and parse_all - try: - self.parse_string(str(test_string), parse_all=parseAll) - return True - except ParseBaseException: - return False - - def run_tests( - self, - tests: Union[str, List[str]], - parse_all: bool = True, - comment: typing.Optional[Union["ParserElement", str]] = "#", - full_dump: bool = True, - print_results: bool = True, - failure_tests: bool = False, - post_parse: Callable[[str, ParseResults], str] = None, - file: typing.Optional[TextIO] = None, - with_line_numbers: bool = False, - *, - parseAll: bool = True, - fullDump: bool = True, - printResults: bool = True, - failureTests: bool = False, - postParse: Callable[[str, ParseResults], str] = None, - ) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]: - """ - Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. - - Parameters: - - ``tests`` - a list of separate test strings, or a multiline string of test strings - - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests - - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test - string; pass None to disable comment filtering - - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline; - if False, only dump nested list - - ``print_results`` - (default= ``True``) prints test output to stdout - - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing - - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as - `fn(test_string, parse_results)` and returns a string to be added to the test output - - ``file`` - (default= ``None``) optional file-like object to which test output will be written; - if None, will default to ``sys.stdout`` - - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers - - Returns: a (success, results) tuple, where success indicates that all tests succeeded - (or failed if ``failure_tests`` is True), and the results contain a list of lines of each - test's output - - Example:: - - number_expr = pyparsing_common.number.copy() - - result = number_expr.run_tests(''' - # unsigned integer - 100 - # negative integer - -100 - # float with scientific notation - 6.02e23 - # integer with scientific notation - 1e-12 - ''') - print("Success" if result[0] else "Failed!") - - result = number_expr.run_tests(''' - # stray character - 100Z - # missing leading digit before '.' - -.100 - # too many '.' - 3.14.159 - ''', failure_tests=True) - print("Success" if result[0] else "Failed!") - - prints:: - - # unsigned integer - 100 - [100] - - # negative integer - -100 - [-100] - - # float with scientific notation - 6.02e23 - [6.02e+23] - - # integer with scientific notation - 1e-12 - [1e-12] - - Success - - # stray character - 100Z - ^ - FAIL: Expected end of text (at char 3), (line:1, col:4) - - # missing leading digit before '.' - -.100 - ^ - FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) - - # too many '.' - 3.14.159 - ^ - FAIL: Expected end of text (at char 4), (line:1, col:5) - - Success - - Each test string must be on a single line. If you want to test a string that spans multiple - lines, create a test like this:: - - expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines") - - (Note that this is a raw string literal, you must include the leading ``'r'``.) - """ - from .testing import pyparsing_test - - parseAll = parseAll and parse_all - fullDump = fullDump and full_dump - printResults = printResults and print_results - failureTests = failureTests or failure_tests - postParse = postParse or post_parse - if isinstance(tests, str_type): - line_strip = type(tests).strip - tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()] - if isinstance(comment, str_type): - comment = Literal(comment) - if file is None: - file = sys.stdout - print_ = file.write - - result: Union[ParseResults, Exception] - allResults = [] - comments = [] - success = True - NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string) - BOM = "\ufeff" - for t in tests: - if comment is not None and comment.matches(t, False) or comments and not t: - comments.append( - pyparsing_test.with_line_numbers(t) if with_line_numbers else t - ) - continue - if not t: - continue - out = [ - "\n" + "\n".join(comments) if comments else "", - pyparsing_test.with_line_numbers(t) if with_line_numbers else t, - ] - comments = [] - try: - # convert newline marks to actual newlines, and strip leading BOM if present - t = NL.transform_string(t.lstrip(BOM)) - result = self.parse_string(t, parse_all=parseAll) - except ParseBaseException as pe: - fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" - out.append(pe.explain()) - out.append("FAIL: " + str(pe)) - if ParserElement.verbose_stacktrace: - out.extend(traceback.format_tb(pe.__traceback__)) - success = success and failureTests - result = pe - except Exception as exc: - out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc)) - if ParserElement.verbose_stacktrace: - out.extend(traceback.format_tb(exc.__traceback__)) - success = success and failureTests - result = exc - else: - success = success and not failureTests - if postParse is not None: - try: - pp_value = postParse(t, result) - if pp_value is not None: - if isinstance(pp_value, ParseResults): - out.append(pp_value.dump()) - else: - out.append(str(pp_value)) - else: - out.append(result.dump()) - except Exception as e: - out.append(result.dump(full=fullDump)) - out.append( - "{} failed: {}: {}".format( - postParse.__name__, type(e).__name__, e - ) - ) - else: - out.append(result.dump(full=fullDump)) - out.append("") - - if printResults: - print_("\n".join(out)) - - allResults.append((t, result)) - - return success, allResults - - def create_diagram( - self, - output_html: Union[TextIO, Path, str], - vertical: int = 3, - show_results_names: bool = False, - show_groups: bool = False, - **kwargs, - ) -> None: - """ - Create a railroad diagram for the parser. - - Parameters: - - output_html (str or file-like object) - output target for generated - diagram HTML - - vertical (int) - threshold for formatting multiple alternatives vertically - instead of horizontally (default=3) - - show_results_names - bool flag whether diagram should show annotations for - defined results names - - show_groups - bool flag whether groups should be highlighted with an unlabeled surrounding box - Additional diagram-formatting keyword arguments can also be included; - see railroad.Diagram class. - """ - - try: - from .diagram import to_railroad, railroad_to_html - except ImportError as ie: - raise Exception( - "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams" - ) from ie - - self.streamline() - - railroad = to_railroad( - self, - vertical=vertical, - show_results_names=show_results_names, - show_groups=show_groups, - diagram_kwargs=kwargs, - ) - if isinstance(output_html, (str, Path)): - with open(output_html, "w", encoding="utf-8") as diag_file: - diag_file.write(railroad_to_html(railroad)) - else: - # we were passed a file-like object, just write to it - output_html.write(railroad_to_html(railroad)) - - setDefaultWhitespaceChars = set_default_whitespace_chars - inlineLiteralsUsing = inline_literals_using - setResultsName = set_results_name - setBreak = set_break - setParseAction = set_parse_action - addParseAction = add_parse_action - addCondition = add_condition - setFailAction = set_fail_action - tryParse = try_parse - canParseNext = can_parse_next - resetCache = reset_cache - enableLeftRecursion = enable_left_recursion - enablePackrat = enable_packrat - parseString = parse_string - scanString = scan_string - searchString = search_string - transformString = transform_string - setWhitespaceChars = set_whitespace_chars - parseWithTabs = parse_with_tabs - setDebugActions = set_debug_actions - setDebug = set_debug - defaultName = default_name - setName = set_name - parseFile = parse_file - runTests = run_tests - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class _PendingSkip(ParserElement): - # internal placeholder class to hold a place were '...' is added to a parser element, - # once another ParserElement is added, this placeholder will be replaced with a SkipTo - def __init__(self, expr: ParserElement, must_skip: bool = False): - super().__init__() - self.anchor = expr - self.must_skip = must_skip - - def _generateDefaultName(self): - return str(self.anchor + Empty()).replace("Empty", "...") - - def __add__(self, other) -> "ParserElement": - skipper = SkipTo(other).set_name("...")("_skipped*") - if self.must_skip: - - def must_skip(t): - if not t._skipped or t._skipped.as_list() == [""]: - del t[0] - t.pop("_skipped", None) - - def show_skip(t): - if t._skipped.as_list()[-1:] == [""]: - t.pop("_skipped") - t["_skipped"] = "missing <" + repr(self.anchor) + ">" - - return ( - self.anchor + skipper().add_parse_action(must_skip) - | skipper().add_parse_action(show_skip) - ) + other - - return self.anchor + skipper + other - - def __repr__(self): - return self.defaultName - - def parseImpl(self, *args): - raise Exception( - "use of `...` expression without following SkipTo target expression" - ) - - -class Token(ParserElement): - """Abstract :class:`ParserElement` subclass, for defining atomic - matching patterns. - """ - - def __init__(self): - super().__init__(savelist=False) - - def _generateDefaultName(self): - return type(self).__name__ - - -class Empty(Token): - """ - An empty token, will always match. - """ - - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """ - A token that will never match. - """ - - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - - def parseImpl(self, instring, loc, doActions=True): - raise ParseException(instring, loc, self.errmsg, self) - - -class Literal(Token): - """ - Token to exactly match a specified string. - - Example:: - - Literal('blah').parse_string('blah') # -> ['blah'] - Literal('blah').parse_string('blahfooblah') # -> ['blah'] - Literal('blah').parse_string('bla') # -> Exception: Expected "blah" - - For case-insensitive matching, use :class:`CaselessLiteral`. - - For keyword matching (force word break before and after the matched string), - use :class:`Keyword` or :class:`CaselessKeyword`. - """ - - def __init__(self, match_string: str = "", *, matchString: str = ""): - super().__init__() - match_string = matchString or match_string - self.match = match_string - self.matchLen = len(match_string) - try: - self.firstMatchChar = match_string[0] - except IndexError: - raise ValueError("null string passed to Literal; use Empty() instead") - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - - # Performance tuning: modify __class__ to select - # a parseImpl optimized for single-character check - if self.matchLen == 1 and type(self) is Literal: - self.__class__ = _SingleCharLiteral - - def _generateDefaultName(self): - return repr(self.match) - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar and instring.startswith( - self.match, loc - ): - return loc + self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - - -class _SingleCharLiteral(Literal): - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar: - return loc + 1, self.match - raise ParseException(instring, loc, self.errmsg, self) - - -ParserElement._literalStringClass = Literal - - -class Keyword(Token): - """ - Token to exactly match a specified string as a keyword, that is, - it must be immediately followed by a non-keyword character. Compare - with :class:`Literal`: - - - ``Literal("if")`` will match the leading ``'if'`` in - ``'ifAndOnlyIf'``. - - ``Keyword("if")`` will not; it will only match the leading - ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` - - Accepts two optional constructor arguments in addition to the - keyword string: - - - ``identChars`` is a string of characters that would be valid - identifier characters, defaulting to all alphanumerics + "_" and - "$" - - ``caseless`` allows case-insensitive matching, default is ``False``. - - Example:: - - Keyword("start").parse_string("start") # -> ['start'] - Keyword("start").parse_string("starting") # -> Exception - - For case-insensitive matching, use :class:`CaselessKeyword`. - """ - - DEFAULT_KEYWORD_CHARS = alphanums + "_$" - - def __init__( - self, - match_string: str = "", - ident_chars: typing.Optional[str] = None, - caseless: bool = False, - *, - matchString: str = "", - identChars: typing.Optional[str] = None, - ): - super().__init__() - identChars = identChars or ident_chars - if identChars is None: - identChars = Keyword.DEFAULT_KEYWORD_CHARS - match_string = matchString or match_string - self.match = match_string - self.matchLen = len(match_string) - try: - self.firstMatchChar = match_string[0] - except IndexError: - raise ValueError("null string passed to Keyword; use Empty() instead") - self.errmsg = "Expected {} {}".format(type(self).__name__, self.name) - self.mayReturnEmpty = False - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = match_string.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def _generateDefaultName(self): - return repr(self.match) - - def parseImpl(self, instring, loc, doActions=True): - errmsg = self.errmsg - errloc = loc - if self.caseless: - if instring[loc : loc + self.matchLen].upper() == self.caselessmatch: - if loc == 0 or instring[loc - 1].upper() not in self.identChars: - if ( - loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen].upper() not in self.identChars - ): - return loc + self.matchLen, self.match - else: - # followed by keyword char - errmsg += ", was immediately followed by keyword character" - errloc = loc + self.matchLen - else: - # preceded by keyword char - errmsg += ", keyword was immediately preceded by keyword character" - errloc = loc - 1 - # else no match just raise plain exception - - else: - if ( - instring[loc] == self.firstMatchChar - and self.matchLen == 1 - or instring.startswith(self.match, loc) - ): - if loc == 0 or instring[loc - 1] not in self.identChars: - if ( - loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen] not in self.identChars - ): - return loc + self.matchLen, self.match - else: - # followed by keyword char - errmsg += ( - ", keyword was immediately followed by keyword character" - ) - errloc = loc + self.matchLen - else: - # preceded by keyword char - errmsg += ", keyword was immediately preceded by keyword character" - errloc = loc - 1 - # else no match just raise plain exception - - raise ParseException(instring, errloc, errmsg, self) - - @staticmethod - def set_default_keyword_chars(chars) -> None: - """ - Overrides the default characters used by :class:`Keyword` expressions. - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - - setDefaultKeywordChars = set_default_keyword_chars - - -class CaselessLiteral(Literal): - """ - Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - - Example:: - - CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10") - # -> ['CMD', 'CMD', 'CMD'] - - (Contrast with example for :class:`CaselessKeyword`.) - """ - - def __init__(self, match_string: str = "", *, matchString: str = ""): - match_string = matchString or match_string - super().__init__(match_string.upper()) - # Preserve the defining literal. - self.returnString = match_string - self.errmsg = "Expected " + self.name - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc : loc + self.matchLen].upper() == self.match: - return loc + self.matchLen, self.returnString - raise ParseException(instring, loc, self.errmsg, self) - - -class CaselessKeyword(Keyword): - """ - Caseless version of :class:`Keyword`. - - Example:: - - CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10") - # -> ['CMD', 'CMD'] - - (Contrast with example for :class:`CaselessLiteral`.) - """ - - def __init__( - self, - match_string: str = "", - ident_chars: typing.Optional[str] = None, - *, - matchString: str = "", - identChars: typing.Optional[str] = None, - ): - identChars = identChars or ident_chars - match_string = matchString or match_string - super().__init__(match_string, identChars, caseless=True) - - -class CloseMatch(Token): - """A variation on :class:`Literal` which matches "close" matches, - that is, strings with at most 'n' mismatching characters. - :class:`CloseMatch` takes parameters: - - - ``match_string`` - string to be matched - - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters - - ``max_mismatches`` - (``default=1``) maximum number of - mismatches allowed to count as a match - - The results from a successful parse will contain the matched text - from the input string and the following named results: - - - ``mismatches`` - a list of the positions within the - match_string where mismatches were found - - ``original`` - the original match_string used to compare - against the input string - - If ``mismatches`` is an empty list, then the match was an exact - match. - - Example:: - - patt = CloseMatch("ATCATCGAATGGA") - patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) - patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) - - # exact match - patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) - - # close match allowing up to 2 mismatches - patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2) - patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) - """ - - def __init__( - self, - match_string: str, - max_mismatches: int = None, - *, - maxMismatches: int = 1, - caseless=False, - ): - maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches - super().__init__() - self.match_string = match_string - self.maxMismatches = maxMismatches - self.errmsg = "Expected {!r} (with up to {} mismatches)".format( - self.match_string, self.maxMismatches - ) - self.caseless = caseless - self.mayIndexError = False - self.mayReturnEmpty = False - - def _generateDefaultName(self): - return "{}:{!r}".format(type(self).__name__, self.match_string) - - def parseImpl(self, instring, loc, doActions=True): - start = loc - instrlen = len(instring) - maxloc = start + len(self.match_string) - - if maxloc <= instrlen: - match_string = self.match_string - match_stringloc = 0 - mismatches = [] - maxMismatches = self.maxMismatches - - for match_stringloc, s_m in enumerate( - zip(instring[loc:maxloc], match_string) - ): - src, mat = s_m - if self.caseless: - src, mat = src.lower(), mat.lower() - - if src != mat: - mismatches.append(match_stringloc) - if len(mismatches) > maxMismatches: - break - else: - loc = start + match_stringloc + 1 - results = ParseResults([instring[start:loc]]) - results["original"] = match_string - results["mismatches"] = mismatches - return loc, results - - raise ParseException(instring, loc, self.errmsg, self) - - -class Word(Token): - """Token for matching words composed of allowed character sets. - Parameters: - - ``init_chars`` - string of all characters that should be used to - match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.; - if ``body_chars`` is also specified, then this is the string of - initial characters - - ``body_chars`` - string of characters that - can be used for matching after a matched initial character as - given in ``init_chars``; if omitted, same as the initial characters - (default=``None``) - - ``min`` - minimum number of characters to match (default=1) - - ``max`` - maximum number of characters to match (default=0) - - ``exact`` - exact number of characters to match (default=0) - - ``as_keyword`` - match as a keyword (default=``False``) - - ``exclude_chars`` - characters that might be - found in the input ``body_chars`` string but which should not be - accepted for matching ;useful to define a word of all - printables except for one or two characters, for instance - (default=``None``) - - :class:`srange` is useful for defining custom character set strings - for defining :class:`Word` expressions, using range notation from - regular expression character sets. - - A common mistake is to use :class:`Word` to match a specific literal - string, as in ``Word("Address")``. Remember that :class:`Word` - uses the string argument to define *sets* of matchable characters. - This expression would match "Add", "AAA", "dAred", or any other word - made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an - exact literal string, use :class:`Literal` or :class:`Keyword`. - - pyparsing includes helper strings for building Words: - - - :class:`alphas` - - :class:`nums` - - :class:`alphanums` - - :class:`hexnums` - - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 - - accented, tilded, umlauted, etc.) - - :class:`punc8bit` (non-alphabetic characters in ASCII range - 128-255 - currency, symbols, superscripts, diacriticals, etc.) - - :class:`printables` (any non-whitespace character) - - ``alphas``, ``nums``, and ``printables`` are also defined in several - Unicode sets - see :class:`pyparsing_unicode``. - - Example:: - - # a word composed of digits - integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) - - # a word with a leading capital, and zero or more lowercase - capital_word = Word(alphas.upper(), alphas.lower()) - - # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums + '-') - - # roman numeral (not a strict parser, accepts invalid mix of characters) - roman = Word("IVXLCDM") - - # any string of non-whitespace characters, except for ',' - csv_value = Word(printables, exclude_chars=",") - """ - - def __init__( - self, - init_chars: str = "", - body_chars: typing.Optional[str] = None, - min: int = 1, - max: int = 0, - exact: int = 0, - as_keyword: bool = False, - exclude_chars: typing.Optional[str] = None, - *, - initChars: typing.Optional[str] = None, - bodyChars: typing.Optional[str] = None, - asKeyword: bool = False, - excludeChars: typing.Optional[str] = None, - ): - initChars = initChars or init_chars - bodyChars = bodyChars or body_chars - asKeyword = asKeyword or as_keyword - excludeChars = excludeChars or exclude_chars - super().__init__() - if not initChars: - raise ValueError( - "invalid {}, initChars cannot be empty string".format( - type(self).__name__ - ) - ) - - initChars = set(initChars) - self.initChars = initChars - if excludeChars: - excludeChars = set(excludeChars) - initChars -= excludeChars - if bodyChars: - bodyChars = set(bodyChars) - excludeChars - self.initCharsOrig = "".join(sorted(initChars)) - - if bodyChars: - self.bodyCharsOrig = "".join(sorted(bodyChars)) - self.bodyChars = set(bodyChars) - else: - self.bodyCharsOrig = "".join(sorted(initChars)) - self.bodyChars = set(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted" - ) - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asKeyword = asKeyword - - # see if we can make a regex for this Word - if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0): - if self.bodyChars == self.initChars: - if max == 0: - repeat = "+" - elif max == 1: - repeat = "" - else: - repeat = "{{{},{}}}".format( - self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen - ) - self.reString = "[{}]{}".format( - _collapse_string_to_ranges(self.initChars), - repeat, - ) - elif len(self.initChars) == 1: - if max == 0: - repeat = "*" - else: - repeat = "{{0,{}}}".format(max - 1) - self.reString = "{}[{}]{}".format( - re.escape(self.initCharsOrig), - _collapse_string_to_ranges(self.bodyChars), - repeat, - ) - else: - if max == 0: - repeat = "*" - elif max == 2: - repeat = "" - else: - repeat = "{{0,{}}}".format(max - 1) - self.reString = "[{}][{}]{}".format( - _collapse_string_to_ranges(self.initChars), - _collapse_string_to_ranges(self.bodyChars), - repeat, - ) - if self.asKeyword: - self.reString = r"\b" + self.reString + r"\b" - - try: - self.re = re.compile(self.reString) - except re.error: - self.re = None - else: - self.re_match = self.re.match - self.__class__ = _WordRegex - - def _generateDefaultName(self): - def charsAsStr(s): - max_repr_len = 16 - s = _collapse_string_to_ranges(s, re_escape=False) - if len(s) > max_repr_len: - return s[: max_repr_len - 3] + "..." - else: - return s - - if self.initChars != self.bodyChars: - base = "W:({}, {})".format( - charsAsStr(self.initChars), charsAsStr(self.bodyChars) - ) - else: - base = "W:({})".format(charsAsStr(self.initChars)) - - # add length specification - if self.minLen > 1 or self.maxLen != _MAX_INT: - if self.minLen == self.maxLen: - if self.minLen == 1: - return base[2:] - else: - return base + "{{{}}}".format(self.minLen) - elif self.maxLen == _MAX_INT: - return base + "{{{},...}}".format(self.minLen) - else: - return base + "{{{},{}}}".format(self.minLen, self.maxLen) - return base - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.initChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min(maxloc, instrlen) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - elif self.asKeyword: - if ( - start > 0 - and instring[start - 1] in bodychars - or loc < instrlen - and instring[loc] in bodychars - ): - throwException = True - - if throwException: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class _WordRegex(Word): - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - -class Char(_WordRegex): - """A short-cut class for defining :class:`Word` ``(characters, exact=1)``, - when defining a match of any single character in a string of - characters. - """ - - def __init__( - self, - charset: str, - as_keyword: bool = False, - exclude_chars: typing.Optional[str] = None, - *, - asKeyword: bool = False, - excludeChars: typing.Optional[str] = None, - ): - asKeyword = asKeyword or as_keyword - excludeChars = excludeChars or exclude_chars - super().__init__( - charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars - ) - self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars)) - if asKeyword: - self.reString = r"\b{}\b".format(self.reString) - self.re = re.compile(self.reString) - self.re_match = self.re.match - - -class Regex(Token): - r"""Token for matching strings that match a given regular - expression. Defined with string specifying the regular expression in - a form recognized by the stdlib Python `re module `_. - If the given regex contains named groups (defined using ``(?P...)``), - these will be preserved as named :class:`ParseResults`. - - If instead of the Python stdlib ``re`` module you wish to use a different RE module - (such as the ``regex`` module), you can do so by building your ``Regex`` object with - a compiled RE that was compiled using ``regex``. - - Example:: - - realnum = Regex(r"[+-]?\d+\.\d*") - # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression - roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") - - # named fields in a regex will be returned as named results - date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') - - # the Regex class will accept re's compiled using the regex module - import regex - parser = pp.Regex(regex.compile(r'[0-9]')) - """ - - def __init__( - self, - pattern: Any, - flags: Union[re.RegexFlag, int] = 0, - as_group_list: bool = False, - as_match: bool = False, - *, - asGroupList: bool = False, - asMatch: bool = False, - ): - """The parameters ``pattern`` and ``flags`` are passed - to the ``re.compile()`` function as-is. See the Python - `re module `_ module for an - explanation of the acceptable patterns and flags. - """ - super().__init__() - asGroupList = asGroupList or as_group_list - asMatch = asMatch or as_match - - if isinstance(pattern, str_type): - if not pattern: - raise ValueError("null string passed to Regex; use Empty() instead") - - self._re = None - self.reString = self.pattern = pattern - self.flags = flags - - elif hasattr(pattern, "pattern") and hasattr(pattern, "match"): - self._re = pattern - self.pattern = self.reString = pattern.pattern - self.flags = flags - - else: - raise TypeError( - "Regex may only be constructed with a string or a compiled RE object" - ) - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asGroupList = asGroupList - self.asMatch = asMatch - if self.asGroupList: - self.parseImpl = self.parseImplAsGroupList - if self.asMatch: - self.parseImpl = self.parseImplAsMatch - - @cached_property - def re(self): - if self._re: - return self._re - else: - try: - return re.compile(self.pattern, self.flags) - except re.error: - raise ValueError( - "invalid pattern ({!r}) passed to Regex".format(self.pattern) - ) - - @cached_property - def re_match(self): - return self.re.match - - @cached_property - def mayReturnEmpty(self): - return self.re_match("") is not None - - def _generateDefaultName(self): - return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\")) - - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = ParseResults(result.group()) - d = result.groupdict() - if d: - for k, v in d.items(): - ret[k] = v - return loc, ret - - def parseImplAsGroupList(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.groups() - return loc, ret - - def parseImplAsMatch(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result - return loc, ret - - def sub(self, repl: str) -> ParserElement: - r""" - Return :class:`Regex` with an attached parse action to transform the parsed - result as if called using `re.sub(expr, repl, string) `_. - - Example:: - - make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") - print(make_html.transform_string("h1:main title:")) - # prints "

main title

" - """ - if self.asGroupList: - raise TypeError("cannot use sub() with Regex(asGroupList=True)") - - if self.asMatch and callable(repl): - raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)") - - if self.asMatch: - - def pa(tokens): - return tokens[0].expand(repl) - - else: - - def pa(tokens): - return self.re.sub(repl, tokens[0]) - - return self.add_parse_action(pa) - - -class QuotedString(Token): - r""" - Token for matching strings that are delimited by quoting characters. - - Defined with the following parameters: - - - ``quote_char`` - string of one or more characters defining the - quote delimiting string - - ``esc_char`` - character to re_escape quotes, typically backslash - (default= ``None``) - - ``esc_quote`` - special quote sequence to re_escape an embedded quote - string (such as SQL's ``""`` to re_escape an embedded ``"``) - (default= ``None``) - - ``multiline`` - boolean indicating whether quotes can span - multiple lines (default= ``False``) - - ``unquote_results`` - boolean indicating whether the matched text - should be unquoted (default= ``True``) - - ``end_quote_char`` - string of one or more characters defining the - end of the quote delimited string (default= ``None`` => same as - quote_char) - - ``convert_whitespace_escapes`` - convert escaped whitespace - (``'\t'``, ``'\n'``, etc.) to actual whitespace - (default= ``True``) - - Example:: - - qs = QuotedString('"') - print(qs.search_string('lsjdf "This is the quote" sldjf')) - complex_qs = QuotedString('{{', end_quote_char='}}') - print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf')) - sql_qs = QuotedString('"', esc_quote='""') - print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) - - prints:: - - [['This is the quote']] - [['This is the "quote"']] - [['This is the quote with "embedded" quotes']] - """ - ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r")) - - def __init__( - self, - quote_char: str = "", - esc_char: typing.Optional[str] = None, - esc_quote: typing.Optional[str] = None, - multiline: bool = False, - unquote_results: bool = True, - end_quote_char: typing.Optional[str] = None, - convert_whitespace_escapes: bool = True, - *, - quoteChar: str = "", - escChar: typing.Optional[str] = None, - escQuote: typing.Optional[str] = None, - unquoteResults: bool = True, - endQuoteChar: typing.Optional[str] = None, - convertWhitespaceEscapes: bool = True, - ): - super().__init__() - escChar = escChar or esc_char - escQuote = escQuote or esc_quote - unquoteResults = unquoteResults and unquote_results - endQuoteChar = endQuoteChar or end_quote_char - convertWhitespaceEscapes = ( - convertWhitespaceEscapes and convert_whitespace_escapes - ) - quote_char = quoteChar or quote_char - - # remove white space from quote chars - wont work anyway - quote_char = quote_char.strip() - if not quote_char: - raise ValueError("quote_char cannot be the empty string") - - if endQuoteChar is None: - endQuoteChar = quote_char - else: - endQuoteChar = endQuoteChar.strip() - if not endQuoteChar: - raise ValueError("endQuoteChar cannot be the empty string") - - self.quoteChar = quote_char - self.quoteCharLen = len(quote_char) - self.firstQuoteChar = quote_char[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - self.convertWhitespaceEscapes = convertWhitespaceEscapes - - sep = "" - inner_pattern = "" - - if escQuote: - inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote)) - sep = "|" - - if escChar: - inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar)) - sep = "|" - self.escCharReplacePattern = re.escape(self.escChar) + "(.)" - - if len(self.endQuoteChar) > 1: - inner_pattern += ( - "{}(?:".format(sep) - + "|".join( - "(?:{}(?!{}))".format( - re.escape(self.endQuoteChar[:i]), - re.escape(self.endQuoteChar[i:]), - ) - for i in range(len(self.endQuoteChar) - 1, 0, -1) - ) - + ")" - ) - sep = "|" - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - inner_pattern += r"{}(?:[^{}{}])".format( - sep, - _escape_regex_range_chars(self.endQuoteChar[0]), - (_escape_regex_range_chars(escChar) if escChar is not None else ""), - ) - else: - self.flags = 0 - inner_pattern += r"{}(?:[^{}\n\r{}])".format( - sep, - _escape_regex_range_chars(self.endQuoteChar[0]), - (_escape_regex_range_chars(escChar) if escChar is not None else ""), - ) - - self.pattern = "".join( - [ - re.escape(self.quoteChar), - "(?:", - inner_pattern, - ")*", - re.escape(self.endQuoteChar), - ] - ) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - self.re_match = self.re.match - except re.error: - raise ValueError( - "invalid pattern {!r} passed to Regex".format(self.pattern) - ) - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def _generateDefaultName(self): - if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type): - return "string enclosed in {!r}".format(self.quoteChar) - - return "quoted string, starting with {} ending with {}".format( - self.quoteChar, self.endQuoteChar - ) - - def parseImpl(self, instring, loc, doActions=True): - result = ( - instring[loc] == self.firstQuoteChar - and self.re_match(instring, loc) - or None - ) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen : -self.endQuoteCharLen] - - if isinstance(ret, str_type): - # replace escaped whitespace - if "\\" in ret and self.convertWhitespaceEscapes: - for wslit, wschar in self.ws_map: - ret = ret.replace(wslit, wschar) - - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given - set (will include whitespace in matched characters if not listed in - the provided exclusion set - see example). Defined with string - containing all disallowed characters, and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. - - Example:: - - # define a comma-separated-value as anything that is not a ',' - csv_value = CharsNotIn(',') - print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213")) - - prints:: - - ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] - """ - - def __init__( - self, - not_chars: str = "", - min: int = 1, - max: int = 0, - exact: int = 0, - *, - notChars: str = "", - ): - super().__init__() - self.skipWhitespace = False - self.notChars = not_chars or notChars - self.notCharsSet = set(self.notChars) - - if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use " - "Opt(CharsNotIn()) if zero-length char group is permitted" - ) - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = self.minLen == 0 - self.mayIndexError = False - - def _generateDefaultName(self): - not_chars_str = _collapse_string_to_ranges(self.notChars) - if len(not_chars_str) > 16: - return "!W:({}...)".format(self.notChars[: 16 - 3]) - else: - return "!W:({})".format(self.notChars) - - def parseImpl(self, instring, loc, doActions=True): - notchars = self.notCharsSet - if instring[loc] in notchars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - maxlen = min(start + self.maxLen, len(instring)) - while loc < maxlen and instring[loc] not in notchars: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class White(Token): - """Special matching class for matching whitespace. Normally, - whitespace is ignored by pyparsing grammars. This class is included - when some whitespace structures are significant. Define with - a string containing the whitespace characters to be matched; default - is ``" \\t\\r\\n"``. Also takes optional ``min``, - ``max``, and ``exact`` arguments, as defined for the - :class:`Word` class. - """ - - whiteStrs = { - " ": "", - "\t": "", - "\n": "", - "\r": "", - "\f": "", - "\u00A0": "", - "\u1680": "", - "\u180E": "", - "\u2000": "", - "\u2001": "", - "\u2002": "", - "\u2003": "", - "\u2004": "", - "\u2005": "", - "\u2006": "", - "\u2007": "", - "\u2008": "", - "\u2009": "", - "\u200A": "", - "\u200B": "", - "\u202F": "", - "\u205F": "", - "\u3000": "", - } - - def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0): - super().__init__() - self.matchWhite = ws - self.set_whitespace_chars( - "".join(c for c in self.whiteStrs if c not in self.matchWhite), - copy_defaults=True, - ) - # self.leave_whitespace() - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def _generateDefaultName(self): - return "".join(White.whiteStrs[c] for c in self.matchWhite) - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.matchWhite: - raise ParseException(instring, loc, self.errmsg, self) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min(maxloc, len(instring)) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class PositionToken(Token): - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - - -class GoToColumn(PositionToken): - """Token to advance to a specific column of input text; useful for - tabular report scraping. - """ - - def __init__(self, colno: int): - super().__init__() - self.col = colno - - def preParse(self, instring, loc): - if col(loc, instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - while ( - loc < instrlen - and instring[loc].isspace() - and col(loc, instring) != self.col - ): - loc += 1 - return loc - - def parseImpl(self, instring, loc, doActions=True): - thiscol = col(loc, instring) - if thiscol > self.col: - raise ParseException(instring, loc, "Text not in expected column", self) - newloc = loc + self.col - thiscol - ret = instring[loc:newloc] - return newloc, ret - - -class LineStart(PositionToken): - r"""Matches if current position is at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (LineStart() + 'AAA' + restOfLine).search_string(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - - def __init__(self): - super().__init__() - self.leave_whitespace() - self.orig_whiteChars = set() | self.whiteChars - self.whiteChars.discard("\n") - self.skipper = Empty().set_whitespace_chars(self.whiteChars) - self.errmsg = "Expected start of line" - - def preParse(self, instring, loc): - if loc == 0: - return loc - else: - ret = self.skipper.preParse(instring, loc) - if "\n" in self.orig_whiteChars: - while instring[ret : ret + 1] == "\n": - ret = self.skipper.preParse(instring, ret + 1) - return ret - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) == 1: - return loc, [] - raise ParseException(instring, loc, self.errmsg, self) - - -class LineEnd(PositionToken): - """Matches if current position is at the end of a line within the - parse string - """ - - def __init__(self): - super().__init__() - self.whiteChars.discard("\n") - self.set_whitespace_chars(self.whiteChars, copy_defaults=False) - self.errmsg = "Expected end of line" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - if instring[loc] == "\n": - return loc + 1, "\n" - else: - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - - -class StringStart(PositionToken): - """Matches if current position is at the beginning of the parse - string - """ - - def __init__(self): - super().__init__() - self.errmsg = "Expected start of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse(instring, 0): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class StringEnd(PositionToken): - """ - Matches if current position is at the end of the parse string - """ - - def __init__(self): - super().__init__() - self.errmsg = "Expected end of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - elif loc > len(instring): - return loc, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - - -class WordStart(PositionToken): - """Matches if the current position is at the beginning of a - :class:`Word`, and is not preceded by any character in a given - set of ``word_chars`` (default= ``printables``). To emulate the - ``\b`` behavior of regular expressions, use - ``WordStart(alphanums)``. ``WordStart`` will also match at - the beginning of the string being parsed, or at the beginning of - a line. - """ - - def __init__(self, word_chars: str = printables, *, wordChars: str = printables): - wordChars = word_chars if wordChars == printables else wordChars - super().__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - if ( - instring[loc - 1] in self.wordChars - or instring[loc] not in self.wordChars - ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class WordEnd(PositionToken): - """Matches if the current position is at the end of a :class:`Word`, - and is not followed by any character in a given set of ``word_chars`` - (default= ``printables``). To emulate the ``\b`` behavior of - regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` - will also match at the end of the string being parsed, or at the end - of a line. - """ - - def __init__(self, word_chars: str = printables, *, wordChars: str = printables): - wordChars = word_chars if wordChars == printables else wordChars - super().__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True): - instrlen = len(instring) - if instrlen > 0 and loc < instrlen: - if ( - instring[loc] in self.wordChars - or instring[loc - 1] not in self.wordChars - ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class ParseExpression(ParserElement): - """Abstract subclass of ParserElement, for combining and - post-processing parsed tokens. - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(savelist) - self.exprs: List[ParserElement] - if isinstance(exprs, _generatorType): - exprs = list(exprs) - - if isinstance(exprs, str_type): - self.exprs = [self._literalStringClass(exprs)] - elif isinstance(exprs, ParserElement): - self.exprs = [exprs] - elif isinstance(exprs, Iterable): - exprs = list(exprs) - # if sequence of strings provided, wrap with Literal - if any(isinstance(expr, str_type) for expr in exprs): - exprs = ( - self._literalStringClass(e) if isinstance(e, str_type) else e - for e in exprs - ) - self.exprs = list(exprs) - else: - try: - self.exprs = list(exprs) - except TypeError: - self.exprs = [exprs] - self.callPreparse = False - - def recurse(self) -> Sequence[ParserElement]: - return self.exprs[:] - - def append(self, other) -> ParserElement: - self.exprs.append(other) - self._defaultName = None - return self - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - """ - Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on - all contained expressions. - """ - super().leave_whitespace(recursive) - - if recursive: - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.leave_whitespace(recursive) - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - """ - Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on - all contained expressions. - """ - super().ignore_whitespace(recursive) - if recursive: - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.ignore_whitespace(recursive) - return self - - def ignore(self, other) -> ParserElement: - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super().ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - else: - super().ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - return self - - def _generateDefaultName(self): - return "{}:({})".format(self.__class__.__name__, str(self.exprs)) - - def streamline(self) -> ParserElement: - if self.streamlined: - return self - - super().streamline() - - for e in self.exprs: - e.streamline() - - # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)`` - # but only if there are no parse actions or resultsNames on the nested And's - # (likewise for :class:`Or`'s and :class:`MatchFirst`'s) - if len(self.exprs) == 2: - other = self.exprs[0] - if ( - isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug - ): - self.exprs = other.exprs[:] + [self.exprs[1]] - self._defaultName = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - other = self.exprs[-1] - if ( - isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug - ): - self.exprs = self.exprs[:-1] + other.exprs[:] - self._defaultName = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - self.errmsg = "Expected " + str(self) - - return self - - def validate(self, validateTrace=None) -> None: - tmp = (validateTrace if validateTrace is not None else [])[:] + [self] - for e in self.exprs: - e.validate(tmp) - self._checkRecursion([]) - - def copy(self) -> ParserElement: - ret = super().copy() - ret.exprs = [e.copy() for e in self.exprs] - return ret - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_ungrouped_named_tokens_in_collection - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in self.suppress_warnings_ - ): - for e in self.exprs: - if ( - isinstance(e, ParserElement) - and e.resultsName - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in e.suppress_warnings_ - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "collides with {!r} on contained expression".format( - "warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class And(ParseExpression): - """ - Requires all given :class:`ParseExpression` s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the ``'+'`` operator. - May also be constructed using the ``'-'`` operator, which will - suppress backtracking. - - Example:: - - integer = Word(nums) - name_expr = Word(alphas)[1, ...] - - expr = And([integer("id"), name_expr("name"), integer("age")]) - # more easily written as: - expr = integer("id") + name_expr("name") + integer("age") - """ - - class _ErrorStop(Empty): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.leave_whitespace() - - def _generateDefaultName(self): - return "-" - - def __init__( - self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True - ): - exprs: List[ParserElement] = list(exprs_arg) - if exprs and Ellipsis in exprs: - tmp = [] - for i, expr in enumerate(exprs): - if expr is Ellipsis: - if i < len(exprs) - 1: - skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1] - tmp.append(SkipTo(skipto_arg)("_skipped*")) - else: - raise Exception( - "cannot construct And with sequence ending in ..." - ) - else: - tmp.append(expr) - exprs[:] = tmp - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - if not isinstance(self.exprs[0], White): - self.set_whitespace_chars( - self.exprs[0].whiteChars, - copy_defaults=self.exprs[0].copyDefaultWhiteChars, - ) - self.skipWhitespace = self.exprs[0].skipWhitespace - else: - self.skipWhitespace = False - else: - self.mayReturnEmpty = True - self.callPreparse = True - - def streamline(self) -> ParserElement: - # collapse any _PendingSkip's - if self.exprs: - if any( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - for e in self.exprs[:-1] - ): - for i, e in enumerate(self.exprs[:-1]): - if e is None: - continue - if ( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - ): - e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] - self.exprs[i + 1] = None - self.exprs = [e for e in self.exprs if e is not None] - - super().streamline() - - # link any IndentedBlocks to the prior expression - for prev, cur in zip(self.exprs, self.exprs[1:]): - # traverse cur or any first embedded expr of cur looking for an IndentedBlock - # (but watch out for recursive grammar) - seen = set() - while cur: - if id(cur) in seen: - break - seen.add(id(cur)) - if isinstance(cur, IndentedBlock): - prev.add_parse_action( - lambda s, l, t, cur_=cur: setattr( - cur_, "parent_anchor", col(l, s) - ) - ) - break - subs = cur.recurse() - cur = next(iter(subs), None) - - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - # pass False as callPreParse arg to _parse for first element, since we already - # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse( - instring, loc, doActions, callPreParse=False - ) - errorStop = False - for e in self.exprs[1:]: - # if isinstance(e, And._ErrorStop): - if type(e) is And._ErrorStop: - errorStop = True - continue - if errorStop: - try: - loc, exprtokens = e._parse(instring, loc, doActions) - except ParseSyntaxException: - raise - except ParseBaseException as pe: - pe.__traceback__ = None - raise ParseSyntaxException._from_exception(pe) - except IndexError: - raise ParseSyntaxException( - instring, len(instring), self.errmsg, self - ) - else: - loc, exprtokens = e._parse(instring, loc, doActions) - if exprtokens or exprtokens.haskeys(): - resultlist += exprtokens - return loc, resultlist - - def __iadd__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # And([self, other]) - - def _checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e._checkRecursion(subRecCheckList) - if not e.mayReturnEmpty: - break - - def _generateDefaultName(self): - inner = " ".join(str(e) for e in self.exprs) - # strip off redundant inner {}'s - while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": - inner = inner[1:-1] - return "{" + inner + "}" - - -class Or(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the expression that matches the longest - string will be used. May be constructed using the ``'^'`` - operator. - - Example:: - - # construct Or using '^' operator - - number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) - print(number.search_string("123 3.1416 789")) - - prints:: - - [['123'], ['3.1416'], ['789']] - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self) -> ParserElement: - super().streamline() - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.saveAsList = any(e.saveAsList for e in self.exprs) - self.skipWhitespace = all( - e.skipWhitespace and not isinstance(e, White) for e in self.exprs - ) - else: - self.saveAsList = False - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - matches = [] - fatals = [] - if all(e.callPreparse for e in self.exprs): - loc = self.preParse(instring, loc) - for e in self.exprs: - try: - loc2 = e.try_parse(instring, loc, raise_fatal=True) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - fatals.append(pfe) - maxException = None - maxExcLoc = -1 - except ParseException as err: - if not fatals: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException( - instring, len(instring), e.errmsg, self - ) - maxExcLoc = len(instring) - else: - # save match among all matches, to retry longest to shortest - matches.append((loc2, e)) - - if matches: - # re-evaluate all matches in descending order of length of match, in case attached actions - # might change whether or how much they match of the input. - matches.sort(key=itemgetter(0), reverse=True) - - if not doActions: - # no further conditions or parse actions to change the selection of - # alternative, so the first match will be the best match - best_expr = matches[0][1] - return best_expr._parse(instring, loc, doActions) - - longest = -1, None - for loc1, expr1 in matches: - if loc1 <= longest[0]: - # already have a longer match than this one will deliver, we are done - return longest - - try: - loc2, toks = expr1._parse(instring, loc, doActions) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - else: - if loc2 >= loc1: - return loc2, toks - # didn't match as much as before - elif loc2 > longest[0]: - longest = loc2, toks - - if longest != (-1, None): - return longest - - if fatals: - if len(fatals) > 1: - fatals.sort(key=lambda e: -e.loc) - if fatals[0].loc == fatals[1].loc: - fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) - max_fatal = fatals[0] - raise max_fatal - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException( - instring, loc, "no defined alternatives to match", self - ) - - def __ixor__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # Or([self, other]) - - def _generateDefaultName(self): - return "{" + " ^ ".join(str(e) for e in self.exprs) + "}" - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_multiple_tokens_in_named_alternation - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in self.suppress_warnings_ - ): - if any( - isinstance(e, And) - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in e.suppress_warnings_ - for e in self.exprs - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "will return a list of all parsed tokens in an And alternative, " - "in prior versions only the first token was returned; enclose " - "contained argument in Group".format( - "warn_multiple_tokens_in_named_alternation", - name, - type(self).__name__, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class MatchFirst(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - more than one expression matches, the first one listed is the one that will - match. May be constructed using the ``'|'`` operator. - - Example:: - - # construct MatchFirst using '|' operator - - # watch the order of expressions to match - number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) - print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] - - # put more selective expression first - number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) - print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self) -> ParserElement: - if self.streamlined: - return self - - super().streamline() - if self.exprs: - self.saveAsList = any(e.saveAsList for e in self.exprs) - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all( - e.skipWhitespace and not isinstance(e, White) for e in self.exprs - ) - else: - self.saveAsList = False - self.mayReturnEmpty = True - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - - for e in self.exprs: - try: - return e._parse( - instring, - loc, - doActions, - ) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - raise - except ParseException as err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException( - instring, len(instring), e.errmsg, self - ) - maxExcLoc = len(instring) - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException( - instring, loc, "no defined alternatives to match", self - ) - - def __ior__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # MatchFirst([self, other]) - - def _generateDefaultName(self): - return "{" + " | ".join(str(e) for e in self.exprs) + "}" - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_multiple_tokens_in_named_alternation - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in self.suppress_warnings_ - ): - if any( - isinstance(e, And) - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in e.suppress_warnings_ - for e in self.exprs - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "will return a list of all parsed tokens in an And alternative, " - "in prior versions only the first token was returned; enclose " - "contained argument in Group".format( - "warn_multiple_tokens_in_named_alternation", - name, - type(self).__name__, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class Each(ParseExpression): - """Requires all given :class:`ParseExpression` s to be found, but in - any order. Expressions may be separated by whitespace. - - May be constructed using the ``'&'`` operator. - - Example:: - - color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") - shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") - integer = Word(nums) - shape_attr = "shape:" + shape_type("shape") - posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") - color_attr = "color:" + color("color") - size_attr = "size:" + integer("size") - - # use Each (using operator '&') to accept attributes in any order - # (shape and posn are required, color and size are optional) - shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr) - - shape_spec.run_tests(''' - shape: SQUARE color: BLACK posn: 100, 120 - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - color:GREEN size:20 shape:TRIANGLE posn:20,40 - ''' - ) - - prints:: - - shape: SQUARE color: BLACK posn: 100, 120 - ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - - color: BLACK - - posn: ['100', ',', '120'] - - x: 100 - - y: 120 - - shape: SQUARE - - - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - - color: BLUE - - posn: ['50', ',', '80'] - - x: 50 - - y: 80 - - shape: CIRCLE - - size: 50 - - - color: GREEN size: 20 shape: TRIANGLE posn: 20,40 - ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - - color: GREEN - - posn: ['20', ',', '40'] - - x: 20 - - y: 40 - - shape: TRIANGLE - - size: 20 - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - self.skipWhitespace = True - self.initExprGroups = True - self.saveAsList = True - - def streamline(self) -> ParserElement: - super().streamline() - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - return self - - def parseImpl(self, instring, loc, doActions=True): - if self.initExprGroups: - self.opt1map = dict( - (id(e.expr), e) for e in self.exprs if isinstance(e, Opt) - ) - opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)] - opt2 = [ - e - for e in self.exprs - if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore)) - ] - self.optionals = opt1 + opt2 - self.multioptionals = [ - e.expr.set_results_name(e.resultsName, list_all_matches=True) - for e in self.exprs - if isinstance(e, _MultipleMatch) - ] - self.multirequired = [ - e.expr.set_results_name(e.resultsName, list_all_matches=True) - for e in self.exprs - if isinstance(e, OneOrMore) - ] - self.required = [ - e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore)) - ] - self.required += self.multirequired - self.initExprGroups = False - - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - multis = self.multioptionals[:] - matchOrder = [] - - keepMatching = True - failed = [] - fatals = [] - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + multis - failed.clear() - fatals.clear() - for e in tmpExprs: - try: - tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - fatals.append(pfe) - failed.append(e) - except ParseException: - failed.append(e) - else: - matchOrder.append(self.opt1map.get(id(e), e)) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - # look for any ParseFatalExceptions - if fatals: - if len(fatals) > 1: - fatals.sort(key=lambda e: -e.loc) - if fatals[0].loc == fatals[1].loc: - fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) - max_fatal = fatals[0] - raise max_fatal - - if tmpReqd: - missing = ", ".join([str(e) for e in tmpReqd]) - raise ParseException( - instring, - loc, - "Missing one or more required elements ({})".format(missing), - ) - - # add any unmatched Opts, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt] - - total_results = ParseResults([]) - for e in matchOrder: - loc, results = e._parse(instring, loc, doActions) - total_results += results - - return loc, total_results - - def _generateDefaultName(self): - return "{" + " & ".join(str(e) for e in self.exprs) + "}" - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of :class:`ParserElement`, for combining and - post-processing parsed tokens. - """ - - def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): - super().__init__(savelist) - if isinstance(expr, str_type): - if issubclass(self._literalStringClass, Token): - expr = self._literalStringClass(expr) - elif issubclass(type(self), self._literalStringClass): - expr = Literal(expr) - else: - expr = self._literalStringClass(Literal(expr)) - self.expr = expr - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.set_whitespace_chars( - expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars - ) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def recurse(self) -> Sequence[ParserElement]: - return [self.expr] if self.expr is not None else [] - - def parseImpl(self, instring, loc, doActions=True): - if self.expr is not None: - return self.expr._parse(instring, loc, doActions, callPreParse=False) - else: - raise ParseException(instring, loc, "No expression defined", self) - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - super().leave_whitespace(recursive) - - if recursive: - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leave_whitespace(recursive) - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - super().ignore_whitespace(recursive) - - if recursive: - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.ignore_whitespace(recursive) - return self - - def ignore(self, other) -> ParserElement: - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super().ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - else: - super().ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - return self - - def streamline(self) -> ParserElement: - super().streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def _checkRecursion(self, parseElementList): - if self in parseElementList: - raise RecursiveGrammarException(parseElementList + [self]) - subRecCheckList = parseElementList[:] + [self] - if self.expr is not None: - self.expr._checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None) -> None: - if validateTrace is None: - validateTrace = [] - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self._checkRecursion([]) - - def _generateDefaultName(self): - return "{}:({})".format(self.__class__.__name__, str(self.expr)) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class IndentedBlock(ParseElementEnhance): - """ - Expression to match one or more expressions at a given indentation level. - Useful for parsing text where structure is implied by indentation (like Python source code). - """ - - class _Indent(Empty): - def __init__(self, ref_col: int): - super().__init__() - self.errmsg = "expected indent at column {}".format(ref_col) - self.add_condition(lambda s, l, t: col(l, s) == ref_col) - - class _IndentGreater(Empty): - def __init__(self, ref_col: int): - super().__init__() - self.errmsg = "expected indent at column greater than {}".format(ref_col) - self.add_condition(lambda s, l, t: col(l, s) > ref_col) - - def __init__( - self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True - ): - super().__init__(expr, savelist=True) - # if recursive: - # raise NotImplementedError("IndentedBlock with recursive is not implemented") - self._recursive = recursive - self._grouped = grouped - self.parent_anchor = 1 - - def parseImpl(self, instring, loc, doActions=True): - # advance parse position to non-whitespace by using an Empty() - # this should be the column to be used for all subsequent indented lines - anchor_loc = Empty().preParse(instring, loc) - - # see if self.expr matches at the current location - if not it will raise an exception - # and no further work is necessary - self.expr.try_parse(instring, anchor_loc, doActions) - - indent_col = col(anchor_loc, instring) - peer_detect_expr = self._Indent(indent_col) - - inner_expr = Empty() + peer_detect_expr + self.expr - if self._recursive: - sub_indent = self._IndentGreater(indent_col) - nested_block = IndentedBlock( - self.expr, recursive=self._recursive, grouped=self._grouped - ) - nested_block.set_debug(self.debug) - nested_block.parent_anchor = indent_col - inner_expr += Opt(sub_indent + nested_block) - - inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}") - block = OneOrMore(inner_expr) - - trailing_undent = self._Indent(self.parent_anchor) | StringEnd() - - if self._grouped: - wrapper = Group - else: - wrapper = lambda expr: expr - return (wrapper(block) + Optional(trailing_undent)).parseImpl( - instring, anchor_loc, doActions - ) - - -class AtStringStart(ParseElementEnhance): - """Matches if expression matches at the beginning of the parse - string:: - - AtStringStart(Word(nums)).parse_string("123") - # prints ["123"] - - AtStringStart(Word(nums)).parse_string(" 123") - # raises ParseException - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.callPreparse = False - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - raise ParseException(instring, loc, "not found at string start") - return super().parseImpl(instring, loc, doActions) - - -class AtLineStart(ParseElementEnhance): - r"""Matches if an expression matches at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (AtLineStart('AAA') + restOfLine).search_string(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.callPreparse = False - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) != 1: - raise ParseException(instring, loc, "not found at line start") - return super().parseImpl(instring, loc, doActions) - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. - ``FollowedBy`` does *not* advance the parsing position within - the input string, it only verifies that the specified parse - expression matches at the current position. ``FollowedBy`` - always returns a null token list. If any results names are defined - in the lookahead expression, those *will* be returned for access by - name. - - Example:: - - # use FollowedBy to match a label only if it is followed by a ':' - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - - attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint() - - prints:: - - [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - # by using self._expr.parse and deleting the contents of the returned ParseResults list - # we keep any named results that were defined in the FollowedBy expression - _, ret = self.expr._parse(instring, loc, doActions=doActions) - del ret[:] - - return loc, ret - - -class PrecededBy(ParseElementEnhance): - """Lookbehind matching of the given parse expression. - ``PrecededBy`` does not advance the parsing position within the - input string, it only verifies that the specified parse expression - matches prior to the current position. ``PrecededBy`` always - returns a null token list, but if a results name is defined on the - given expression, it is returned. - - Parameters: - - - expr - expression that must match prior to the current parse - location - - retreat - (default= ``None``) - (int) maximum number of characters - to lookbehind prior to the current parse location - - If the lookbehind expression is a string, :class:`Literal`, - :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn` - with a specified exact or maximum length, then the retreat - parameter is not required. Otherwise, retreat must be specified to - give a maximum number of characters to look back from - the current parse position for a lookbehind match. - - Example:: - - # VB-style variable names with type prefixes - int_var = PrecededBy("#") + pyparsing_common.identifier - str_var = PrecededBy("$") + pyparsing_common.identifier - - """ - - def __init__( - self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None - ): - super().__init__(expr) - self.expr = self.expr().leave_whitespace() - self.mayReturnEmpty = True - self.mayIndexError = False - self.exact = False - if isinstance(expr, str_type): - retreat = len(expr) - self.exact = True - elif isinstance(expr, (Literal, Keyword)): - retreat = expr.matchLen - self.exact = True - elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: - retreat = expr.maxLen - self.exact = True - elif isinstance(expr, PositionToken): - retreat = 0 - self.exact = True - self.retreat = retreat - self.errmsg = "not preceded by " + str(expr) - self.skipWhitespace = False - self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) - - def parseImpl(self, instring, loc=0, doActions=True): - if self.exact: - if loc < self.retreat: - raise ParseException(instring, loc, self.errmsg) - start = loc - self.retreat - _, ret = self.expr._parse(instring, start) - else: - # retreat specified a maximum lookbehind window, iterate - test_expr = self.expr + StringEnd() - instring_slice = instring[max(0, loc - self.retreat) : loc] - last_expr = ParseException(instring, loc, self.errmsg) - for offset in range(1, min(loc, self.retreat + 1) + 1): - try: - # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) - _, ret = test_expr._parse( - instring_slice, len(instring_slice) - offset - ) - except ParseBaseException as pbe: - last_expr = pbe - else: - break - else: - raise last_expr - return loc, ret - - -class Located(ParseElementEnhance): - """ - Decorates a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - ``locn_start`` - location where matched expression begins - - ``locn_end`` - location where matched expression ends - - ``value`` - the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parse_with_tabs` - - Example:: - - wd = Word(alphas) - for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [0, ['ljsdf'], 5] - [8, ['lksdjjf'], 15] - [18, ['lkkjj'], 23] - - """ - - def parseImpl(self, instring, loc, doActions=True): - start = loc - loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False) - ret_tokens = ParseResults([start, tokens, loc]) - ret_tokens["locn_start"] = start - ret_tokens["value"] = tokens - ret_tokens["locn_end"] = loc - if self.resultsName: - # must return as a list, so that the name will be attached to the complete group - return loc, [ret_tokens] - else: - return loc, ret_tokens - - -class NotAny(ParseElementEnhance): - """ - Lookahead to disallow matching with the given parse expression. - ``NotAny`` does *not* advance the parsing position within the - input string, it only verifies that the specified parse expression - does *not* match at the current position. Also, ``NotAny`` does - *not* skip over leading whitespace. ``NotAny`` always returns - a null token list. May be constructed using the ``'~'`` operator. - - Example:: - - AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) - - # take care not to mistake keywords for identifiers - ident = ~(AND | OR | NOT) + Word(alphas) - boolean_term = Opt(NOT) + ident - - # very crude boolean expression - to support parenthesis groups and - # operation hierarchy, use infix_notation - boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...] - - # integers that are followed by "." are actually floats - integer = Word(nums) + ~Char(".") - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - # do NOT use self.leave_whitespace(), don't want to propagate to exprs - # self.leave_whitespace() - self.skipWhitespace = False - - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, " + str(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - if self.expr.can_parse_next(instring, loc): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - def _generateDefaultName(self): - return "~{" + str(self.expr) + "}" - - -class _MultipleMatch(ParseElementEnhance): - def __init__( - self, - expr: ParserElement, - stop_on: typing.Optional[Union[ParserElement, str]] = None, - *, - stopOn: typing.Optional[Union[ParserElement, str]] = None, - ): - super().__init__(expr) - stopOn = stopOn or stop_on - self.saveAsList = True - ender = stopOn - if isinstance(ender, str_type): - ender = self._literalStringClass(ender) - self.stopOn(ender) - - def stopOn(self, ender) -> ParserElement: - if isinstance(ender, str_type): - ender = self._literalStringClass(ender) - self.not_ender = ~ender if ender is not None else None - return self - - def parseImpl(self, instring, loc, doActions=True): - self_expr_parse = self.expr._parse - self_skip_ignorables = self._skipIgnorables - check_ender = self.not_ender is not None - if check_ender: - try_not_ender = self.not_ender.tryParse - - # must be at least one (but first see if we are the stopOn sentinel; - # if so, fail) - if check_ender: - try_not_ender(instring, loc) - loc, tokens = self_expr_parse(instring, loc, doActions) - try: - hasIgnoreExprs = not not self.ignoreExprs - while 1: - if check_ender: - try_not_ender(instring, loc) - if hasIgnoreExprs: - preloc = self_skip_ignorables(instring, loc) - else: - preloc = loc - loc, tmptokens = self_expr_parse(instring, preloc, doActions) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens - except (ParseException, IndexError): - pass - - return loc, tokens - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_ungrouped_named_tokens_in_collection - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in self.suppress_warnings_ - ): - for e in [self.expr] + self.expr.recurse(): - if ( - isinstance(e, ParserElement) - and e.resultsName - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in e.suppress_warnings_ - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "collides with {!r} on contained expression".format( - "warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class OneOrMore(_MultipleMatch): - """ - Repetition of one or more of the given expression. - - Parameters: - - expr - expression that must match one or more times - - stop_on - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join)) - - text = "shape: SQUARE posn: upper left color: BLACK" - attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - - # use stop_on attribute for OneOrMore to avoid reading label string as part of the data - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - - # could also be written as - (attr_expr * (1,)).parse_string(text).pprint() - """ - - def _generateDefaultName(self): - return "{" + str(self.expr) + "}..." - - -class ZeroOrMore(_MultipleMatch): - """ - Optional repetition of zero or more of the given expression. - - Parameters: - - ``expr`` - expression that must match zero or more times - - ``stop_on`` - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - (default= ``None``) - - Example: similar to :class:`OneOrMore` - """ - - def __init__( - self, - expr: ParserElement, - stop_on: typing.Optional[Union[ParserElement, str]] = None, - *, - stopOn: typing.Optional[Union[ParserElement, str]] = None, - ): - super().__init__(expr, stopOn=stopOn or stop_on) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - return super().parseImpl(instring, loc, doActions) - except (ParseException, IndexError): - return loc, ParseResults([], name=self.resultsName) - - def _generateDefaultName(self): - return "[" + str(self.expr) + "]..." - - -class _NullToken: - def __bool__(self): - return False - - def __str__(self): - return "" - - -class Opt(ParseElementEnhance): - """ - Optional matching of the given expression. - - Parameters: - - ``expr`` - expression that must match zero or more times - - ``default`` (optional) - value to be returned if the optional expression is not found. - - Example:: - - # US postal code can be a 5-digit zip, plus optional 4-digit qualifier - zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4))) - zip.run_tests(''' - # traditional ZIP code - 12345 - - # ZIP+4 form - 12101-0001 - - # invalid ZIP - 98765- - ''') - - prints:: - - # traditional ZIP code - 12345 - ['12345'] - - # ZIP+4 form - 12101-0001 - ['12101-0001'] - - # invalid ZIP - 98765- - ^ - FAIL: Expected end of text (at char 5), (line:1, col:6) - """ - - __optionalNotMatched = _NullToken() - - def __init__( - self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched - ): - super().__init__(expr, savelist=False) - self.saveAsList = self.expr.saveAsList - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - self_expr = self.expr - try: - loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False) - except (ParseException, IndexError): - default_value = self.defaultValue - if default_value is not self.__optionalNotMatched: - if self_expr.resultsName: - tokens = ParseResults([default_value]) - tokens[self_expr.resultsName] = default_value - else: - tokens = [default_value] - else: - tokens = [] - return loc, tokens - - def _generateDefaultName(self): - inner = str(self.expr) - # strip off redundant inner {}'s - while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": - inner = inner[1:-1] - return "[" + inner + "]" - - -Optional = Opt - - -class SkipTo(ParseElementEnhance): - """ - Token for skipping over all undefined text until the matched - expression is found. - - Parameters: - - ``expr`` - target expression marking the end of the data to be skipped - - ``include`` - if ``True``, the target expression is also parsed - (the skipped text and target expression are returned as a 2-element - list) (default= ``False``). - - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and - comments) that might contain false matches to the target expression - - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, - the :class:`SkipTo` is not a match - - Example:: - - report = ''' - Outstanding Issues Report - 1 Jan 2000 - - # | Severity | Description | Days Open - -----+----------+-------------------------------------------+----------- - 101 | Critical | Intermittent system crash | 6 - 94 | Cosmetic | Spelling error on Login ('log|n') | 14 - 79 | Minor | System slow when running too many reports | 47 - ''' - integer = Word(nums) - SEP = Suppress('|') - # use SkipTo to simply match everything up until the next SEP - # - ignore quoted strings, so that a '|' character inside a quoted string does not match - # - parse action will call token.strip() for each matched token, i.e., the description body - string_data = SkipTo(SEP, ignore=quoted_string) - string_data.set_parse_action(token_map(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP - + integer("days_open")) - - for tkt in ticket_expr.search_string(report): - print tkt.dump() - - prints:: - - ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: '6' - - desc: 'Intermittent system crash' - - issue_num: '101' - - sev: 'Critical' - ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: '14' - - desc: "Spelling error on Login ('log|n')" - - issue_num: '94' - - sev: 'Cosmetic' - ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: '47' - - desc: 'System slow when running too many reports' - - issue_num: '79' - - sev: 'Minor' - """ - - def __init__( - self, - other: Union[ParserElement, str], - include: bool = False, - ignore: bool = None, - fail_on: typing.Optional[Union[ParserElement, str]] = None, - *, - failOn: Union[ParserElement, str] = None, - ): - super().__init__(other) - failOn = failOn or fail_on - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.saveAsList = False - if isinstance(failOn, str_type): - self.failOn = self._literalStringClass(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for " + str(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - startloc = loc - instrlen = len(instring) - self_expr_parse = self.expr._parse - self_failOn_canParseNext = ( - self.failOn.canParseNext if self.failOn is not None else None - ) - self_ignoreExpr_tryParse = ( - self.ignoreExpr.tryParse if self.ignoreExpr is not None else None - ) - - tmploc = loc - while tmploc <= instrlen: - if self_failOn_canParseNext is not None: - # break if failOn expression matches - if self_failOn_canParseNext(instring, tmploc): - break - - if self_ignoreExpr_tryParse is not None: - # advance past ignore expressions - while 1: - try: - tmploc = self_ignoreExpr_tryParse(instring, tmploc) - except ParseBaseException: - break - - try: - self_expr_parse(instring, tmploc, doActions=False, callPreParse=False) - except (ParseException, IndexError): - # no match, advance loc in string - tmploc += 1 - else: - # matched skipto expr, done - break - - else: - # ran off the end of the input string without matching skipto expr, fail - raise ParseException(instring, loc, self.errmsg, self) - - # build up return values - loc = tmploc - skiptext = instring[startloc:loc] - skipresult = ParseResults(skiptext) - - if self.includeMatch: - loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False) - skipresult += mat - - return loc, skipresult - - -class Forward(ParseElementEnhance): - """ - Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the ``Forward`` - variable using the ``'<<'`` operator. - - Note: take care when assigning to ``Forward`` not to overlook - precedence of operators. - - Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that:: - - fwd_expr << a | b | c - - will actually be evaluated as:: - - (fwd_expr << a) | b | c - - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the ``Forward``:: - - fwd_expr << (a | b | c) - - Converting to use the ``'<<='`` operator instead will avoid this problem. - - See :class:`ParseResults.pprint` for an example of a recursive - parser created using ``Forward``. - """ - - def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None): - self.caller_frame = traceback.extract_stack(limit=2)[0] - super().__init__(other, savelist=False) - self.lshift_line = None - - def __lshift__(self, other): - if hasattr(self, "caller_frame"): - del self.caller_frame - if isinstance(other, str_type): - other = self._literalStringClass(other) - self.expr = other - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.set_whitespace_chars( - self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars - ) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - self.lshift_line = traceback.extract_stack(limit=2)[-2] - return self - - def __ilshift__(self, other): - return self << other - - def __or__(self, other): - caller_line = traceback.extract_stack(limit=2)[-2] - if ( - __diag__.warn_on_match_first_with_lshift_operator - and caller_line == self.lshift_line - and Diagnostics.warn_on_match_first_with_lshift_operator - not in self.suppress_warnings_ - ): - warnings.warn( - "using '<<' operator with '|' is probably an error, use '<<='", - stacklevel=2, - ) - ret = super().__or__(other) - return ret - - def __del__(self): - # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<' - if ( - self.expr is None - and __diag__.warn_on_assignment_to_Forward - and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_ - ): - warnings.warn_explicit( - "Forward defined here but no expression attached later using '<<=' or '<<'", - UserWarning, - filename=self.caller_frame.filename, - lineno=self.caller_frame.lineno, - ) - - def parseImpl(self, instring, loc, doActions=True): - if ( - self.expr is None - and __diag__.warn_on_parse_using_empty_Forward - and Diagnostics.warn_on_parse_using_empty_Forward - not in self.suppress_warnings_ - ): - # walk stack until parse_string, scan_string, search_string, or transform_string is found - parse_fns = [ - "parse_string", - "scan_string", - "search_string", - "transform_string", - ] - tb = traceback.extract_stack(limit=200) - for i, frm in enumerate(reversed(tb), start=1): - if frm.name in parse_fns: - stacklevel = i + 1 - break - else: - stacklevel = 2 - warnings.warn( - "Forward expression was never assigned a value, will not parse any input", - stacklevel=stacklevel, - ) - if not ParserElement._left_recursion_enabled: - return super().parseImpl(instring, loc, doActions) - # ## Bounded Recursion algorithm ## - # Recursion only needs to be processed at ``Forward`` elements, since they are - # the only ones that can actually refer to themselves. The general idea is - # to handle recursion stepwise: We start at no recursion, then recurse once, - # recurse twice, ..., until more recursion offers no benefit (we hit the bound). - # - # The "trick" here is that each ``Forward`` gets evaluated in two contexts - # - to *match* a specific recursion level, and - # - to *search* the bounded recursion level - # and the two run concurrently. The *search* must *match* each recursion level - # to find the best possible match. This is handled by a memo table, which - # provides the previous match to the next level match attempt. - # - # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al. - # - # There is a complication since we not only *parse* but also *transform* via - # actions: We do not want to run the actions too often while expanding. Thus, - # we expand using `doActions=False` and only run `doActions=True` if the next - # recursion level is acceptable. - with ParserElement.recursion_lock: - memo = ParserElement.recursion_memos - try: - # we are parsing at a specific recursion expansion - use it as-is - prev_loc, prev_result = memo[loc, self, doActions] - if isinstance(prev_result, Exception): - raise prev_result - return prev_loc, prev_result.copy() - except KeyError: - act_key = (loc, self, True) - peek_key = (loc, self, False) - # we are searching for the best recursion expansion - keep on improving - # both `doActions` cases must be tracked separately here! - prev_loc, prev_peek = memo[peek_key] = ( - loc - 1, - ParseException( - instring, loc, "Forward recursion without base case", self - ), - ) - if doActions: - memo[act_key] = memo[peek_key] - while True: - try: - new_loc, new_peek = super().parseImpl(instring, loc, False) - except ParseException: - # we failed before getting any match – do not hide the error - if isinstance(prev_peek, Exception): - raise - new_loc, new_peek = prev_loc, prev_peek - # the match did not get better: we are done - if new_loc <= prev_loc: - if doActions: - # replace the match for doActions=False as well, - # in case the action did backtrack - prev_loc, prev_result = memo[peek_key] = memo[act_key] - del memo[peek_key], memo[act_key] - return prev_loc, prev_result.copy() - del memo[peek_key] - return prev_loc, prev_peek.copy() - # the match did get better: see if we can improve further - else: - if doActions: - try: - memo[act_key] = super().parseImpl(instring, loc, True) - except ParseException as e: - memo[peek_key] = memo[act_key] = (new_loc, e) - raise - prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - self.skipWhitespace = False - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - self.skipWhitespace = True - return self - - def streamline(self) -> ParserElement: - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate(self, validateTrace=None) -> None: - if validateTrace is None: - validateTrace = [] - - if self not in validateTrace: - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self._checkRecursion([]) - - def _generateDefaultName(self): - # Avoid infinite recursion by setting a temporary _defaultName - self._defaultName = ": ..." - - # Use the string representation of main expression. - retString = "..." - try: - if self.expr is not None: - retString = str(self.expr)[:1000] - else: - retString = "None" - finally: - return self.__class__.__name__ + ": " + retString - - def copy(self) -> ParserElement: - if self.expr is not None: - return super().copy() - else: - ret = Forward() - ret <<= self - return ret - - def _setResultsName(self, name, list_all_matches=False): - if ( - __diag__.warn_name_set_on_empty_Forward - and Diagnostics.warn_name_set_on_empty_Forward - not in self.suppress_warnings_ - ): - if self.expr is None: - warnings.warn( - "{}: setting results name {!r} on {} expression " - "that has no contained expression".format( - "warn_name_set_on_empty_Forward", name, type(self).__name__ - ), - stacklevel=3, - ) - - return super()._setResultsName(name, list_all_matches) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class TokenConverter(ParseElementEnhance): - """ - Abstract subclass of :class:`ParseExpression`, for converting parsed results. - """ - - def __init__(self, expr: Union[ParserElement, str], savelist=False): - super().__init__(expr) # , savelist) - self.saveAsList = False - - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the - input string; this can be disabled by specifying - ``'adjacent=False'`` in the constructor. - - Example:: - - real = Word(nums) + '.' + Word(nums) - print(real.parse_string('3.1416')) # -> ['3', '.', '1416'] - # will also erroneously match the following - print(real.parse_string('3. 1416')) # -> ['3', '.', '1416'] - - real = Combine(Word(nums) + '.' + Word(nums)) - print(real.parse_string('3.1416')) # -> ['3.1416'] - # no match when there are internal spaces - print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...) - """ - - def __init__( - self, - expr: ParserElement, - join_string: str = "", - adjacent: bool = True, - *, - joinString: typing.Optional[str] = None, - ): - super().__init__(expr) - joinString = joinString if joinString is not None else join_string - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leave_whitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore(self, other) -> ParserElement: - if self.adjacent: - ParserElement.ignore(self, other) - else: - super().ignore(other) - return self - - def postParse(self, instring, loc, tokenlist): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults( - ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults - ) - - if self.resultsName and retToks.haskeys(): - return [retToks] - else: - return retToks - - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for - returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. - - The optional ``aslist`` argument when set to True will return the - parsed tokens as a Python list instead of a pyparsing ParseResults. - - Example:: - - ident = Word(alphas) - num = Word(nums) - term = ident | num - func = ident + Opt(delimited_list(term)) - print(func.parse_string("fn a, b, 100")) - # -> ['fn', 'a', 'b', '100'] - - func = ident + Group(Opt(delimited_list(term))) - print(func.parse_string("fn a, b, 100")) - # -> ['fn', ['a', 'b', '100']] - """ - - def __init__(self, expr: ParserElement, aslist: bool = False): - super().__init__(expr) - self.saveAsList = True - self._asPythonList = aslist - - def postParse(self, instring, loc, tokenlist): - if self._asPythonList: - return ParseResults.List( - tokenlist.asList() - if isinstance(tokenlist, ParseResults) - else list(tokenlist) - ) - else: - return [tokenlist] - - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also - as a dictionary. Each element can also be referenced using the first - token in the expression as its key. Useful for tabular report - scraping when the first column can be used as a item key. - - The optional ``asdict`` argument when set to True will return the - parsed tokens as a Python dict instead of a pyparsing ParseResults. - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - - # print attributes as plain groups - print(attr_expr[1, ...].parse_string(text).dump()) - - # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names - result = Dict(Group(attr_expr)[1, ...]).parse_string(text) - print(result.dump()) - - # access named fields as dict entries, or output as dict - print(result['shape']) - print(result.as_dict()) - - prints:: - - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: 'light blue' - - posn: 'upper left' - - shape: 'SQUARE' - - texture: 'burlap' - SQUARE - {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - - See more examples at :class:`ParseResults` of accessing fields by results name. - """ - - def __init__(self, expr: ParserElement, asdict: bool = False): - super().__init__(expr) - self.saveAsList = True - self._asPythonDict = asdict - - def postParse(self, instring, loc, tokenlist): - for i, tok in enumerate(tokenlist): - if len(tok) == 0: - continue - - ikey = tok[0] - if isinstance(ikey, int): - ikey = str(ikey).strip() - - if len(tok) == 1: - tokenlist[ikey] = _ParseResultsWithOffset("", i) - - elif len(tok) == 2 and not isinstance(tok[1], ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) - - else: - try: - dictvalue = tok.copy() # ParseResults(i) - except Exception: - exc = TypeError( - "could not extract dict values from parsed results" - " - Dict expression must contain Grouped expressions" - ) - raise exc from None - - del dictvalue[0] - - if len(dictvalue) != 1 or ( - isinstance(dictvalue, ParseResults) and dictvalue.haskeys() - ): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) - - if self._asPythonDict: - return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() - else: - return [tokenlist] if self.resultsName else tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression. - - Example:: - - source = "a, b, c,d" - wd = Word(alphas) - wd_list1 = wd + (',' + wd)[...] - print(wd_list1.parse_string(source)) - - # often, delimiters that are useful during parsing are just in the - # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + (Suppress(',') + wd)[...] - print(wd_list2.parse_string(source)) - - # Skipped text (using '...') can be suppressed as well - source = "lead in START relevant text END trailing text" - start_marker = Keyword("START") - end_marker = Keyword("END") - find_body = Suppress(...) + start_marker + ... + end_marker - print(find_body.parse_string(source) - - prints:: - - ['a', ',', 'b', ',', 'c', ',', 'd'] - ['a', 'b', 'c', 'd'] - ['START', 'relevant text ', 'END'] - - (See also :class:`delimited_list`.) - """ - - def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): - if expr is ...: - expr = _PendingSkip(NoMatch()) - super().__init__(expr) - - def __add__(self, other) -> "ParserElement": - if isinstance(self.expr, _PendingSkip): - return Suppress(SkipTo(other)) + other - else: - return super().__add__(other) - - def __sub__(self, other) -> "ParserElement": - if isinstance(self.expr, _PendingSkip): - return Suppress(SkipTo(other)) - other - else: - return super().__sub__(other) - - def postParse(self, instring, loc, tokenlist): - return [] - - def suppress(self) -> ParserElement: - return self - - -def trace_parse_action(f: ParseAction) -> ParseAction: - """Decorator for debugging parse actions. - - When the parse action is called, this decorator will print - ``">> entering method-name(line:, , )"``. - When the parse action completes, the decorator will print - ``"<<"`` followed by the returned value, or any exception that the parse action raised. - - Example:: - - wd = Word(alphas) - - @trace_parse_action - def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens)))) - - wds = wd[1, ...].set_parse_action(remove_duplicate_chars) - print(wds.parse_string("slkdjs sld sldd sdlf sdljf")) - - prints:: - - >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) - < 3: - thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc - sys.stderr.write( - ">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t) - ) - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write("< str: - r"""Helper to easily define string ranges for use in :class:`Word` - construction. Borrows syntax from regexp ``'[]'`` string range - definitions:: - - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - - The input string must be enclosed in []'s, and the returned string - is the expanded character set joined into a single string. The - values enclosed in the []'s may be: - - - a single character - - an escaped character with a leading backslash (such as ``\-`` - or ``\]``) - - an escaped hex character with a leading ``'\x'`` - (``\x21``, which is a ``'!'`` character) (``\0x##`` - is also supported for backwards compatibility) - - an escaped octal character with a leading ``'\0'`` - (``\041``, which is a ``'!'`` character) - - a range of any of the above, separated by a dash (``'a-z'``, - etc.) - - any combination of the above (``'aeiouy'``, - ``'a-zA-Z0-9_$'``, etc.) - """ - _expanded = ( - lambda p: p - if not isinstance(p, ParseResults) - else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) - ) - try: - return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body) - except Exception: - return "" - - -def token_map(func, *args) -> ParseAction: - """Helper to define a parse action by mapping a function to all - elements of a :class:`ParseResults` list. If any additional args are passed, - they are forwarded to the given function as additional arguments - after the token, as in - ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``, - which will convert the parsed data to an integer using base 16. - - Example (compare the last to example in :class:`ParserElement.transform_string`:: - - hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16)) - hex_ints.run_tests(''' - 00 11 22 aa FF 0a 0d 1a - ''') - - upperword = Word(alphas).set_parse_action(token_map(str.upper)) - upperword[1, ...].run_tests(''' - my kingdom for a horse - ''') - - wd = Word(alphas).set_parse_action(token_map(str.title)) - wd[1, ...].set_parse_action(' '.join).run_tests(''' - now is the winter of our discontent made glorious summer by this sun of york - ''') - - prints:: - - 00 11 22 aa FF 0a 0d 1a - [0, 17, 34, 170, 255, 10, 13, 26] - - my kingdom for a horse - ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] - - now is the winter of our discontent made glorious summer by this sun of york - ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] - """ - - def pa(s, l, t): - return [func(tokn, *args) for tokn in t] - - func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) - pa.__name__ = func_name - - return pa - - -def autoname_elements() -> None: - """ - Utility to simplify mass-naming of parser elements, for - generating railroad diagram with named subdiagrams. - """ - for name, var in sys._getframe().f_back.f_locals.items(): - if isinstance(var, ParserElement) and not var.customName: - var.set_name(name) - - -dbl_quoted_string = Combine( - Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' -).set_name("string enclosed in double quotes") - -sgl_quoted_string = Combine( - Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" -).set_name("string enclosed in single quotes") - -quoted_string = Combine( - Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' - | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" -).set_name("quotedString using single or double quotes") - -unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") - - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -# build list of built-in expressions, for future reference if a global default value -# gets updated -_builtin_exprs: List[ParserElement] = [ - v for v in vars().values() if isinstance(v, ParserElement) -] - -# backward compatibility names -tokenMap = token_map -conditionAsParseAction = condition_as_parse_action -nullDebugAction = null_debug_action -sglQuotedString = sgl_quoted_string -dblQuotedString = dbl_quoted_string -quotedString = quoted_string -unicodeString = unicode_string -lineStart = line_start -lineEnd = line_end -stringStart = string_start -stringEnd = string_end -traceParseAction = trace_parse_action diff --git a/lib/pkg_resources/_vendor/pyparsing/diagram/__init__.py b/lib/pkg_resources/_vendor/pyparsing/diagram/__init__.py deleted file mode 100644 index 89864475..00000000 --- a/lib/pkg_resources/_vendor/pyparsing/diagram/__init__.py +++ /dev/null @@ -1,642 +0,0 @@ -import railroad -import pyparsing -import typing -from typing import ( - List, - NamedTuple, - Generic, - TypeVar, - Dict, - Callable, - Set, - Iterable, -) -from jinja2 import Template -from io import StringIO -import inspect - - -jinja2_template_source = """\ - - - - {% if not head %} - - {% else %} - {{ head | safe }} - {% endif %} - - -{{ body | safe }} -{% for diagram in diagrams %} -
-

{{ diagram.title }}

-
{{ diagram.text }}
-
- {{ diagram.svg }} -
-
-{% endfor %} - - -""" - -template = Template(jinja2_template_source) - -# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet -NamedDiagram = NamedTuple( - "NamedDiagram", - [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)], -) -""" -A simple structure for associating a name with a railroad diagram -""" - -T = TypeVar("T") - - -class EachItem(railroad.Group): - """ - Custom railroad item to compose a: - - Group containing a - - OneOrMore containing a - - Choice of the elements in the Each - with the group label indicating that all must be matched - """ - - all_label = "[ALL]" - - def __init__(self, *items): - choice_item = railroad.Choice(len(items) - 1, *items) - one_or_more_item = railroad.OneOrMore(item=choice_item) - super().__init__(one_or_more_item, label=self.all_label) - - -class AnnotatedItem(railroad.Group): - """ - Simple subclass of Group that creates an annotation label - """ - - def __init__(self, label: str, item): - super().__init__(item=item, label="[{}]".format(label) if label else label) - - -class EditablePartial(Generic[T]): - """ - Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been - constructed. - """ - - # We need this here because the railroad constructors actually transform the data, so can't be called until the - # entire tree is assembled - - def __init__(self, func: Callable[..., T], args: list, kwargs: dict): - self.func = func - self.args = args - self.kwargs = kwargs - - @classmethod - def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]": - """ - If you call this function in the same way that you would call the constructor, it will store the arguments - as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3) - """ - return EditablePartial(func=func, args=list(args), kwargs=kwargs) - - @property - def name(self): - return self.kwargs["name"] - - def __call__(self) -> T: - """ - Evaluate the partial and return the result - """ - args = self.args.copy() - kwargs = self.kwargs.copy() - - # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g. - # args=['list', 'of', 'things']) - arg_spec = inspect.getfullargspec(self.func) - if arg_spec.varargs in self.kwargs: - args += kwargs.pop(arg_spec.varargs) - - return self.func(*args, **kwargs) - - -def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str: - """ - Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams - :params kwargs: kwargs to be passed in to the template - """ - data = [] - for diagram in diagrams: - if diagram.diagram is None: - continue - io = StringIO() - diagram.diagram.writeSvg(io.write) - title = diagram.name - if diagram.index == 0: - title += " (root)" - data.append({"title": title, "text": "", "svg": io.getvalue()}) - - return template.render(diagrams=data, **kwargs) - - -def resolve_partial(partial: "EditablePartial[T]") -> T: - """ - Recursively resolves a collection of Partials into whatever type they are - """ - if isinstance(partial, EditablePartial): - partial.args = resolve_partial(partial.args) - partial.kwargs = resolve_partial(partial.kwargs) - return partial() - elif isinstance(partial, list): - return [resolve_partial(x) for x in partial] - elif isinstance(partial, dict): - return {key: resolve_partial(x) for key, x in partial.items()} - else: - return partial - - -def to_railroad( - element: pyparsing.ParserElement, - diagram_kwargs: typing.Optional[dict] = None, - vertical: int = 3, - show_results_names: bool = False, - show_groups: bool = False, -) -> List[NamedDiagram]: - """ - Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram - creation if you want to access the Railroad tree before it is converted to HTML - :param element: base element of the parser being diagrammed - :param diagram_kwargs: kwargs to pass to the Diagram() constructor - :param vertical: (optional) - int - limit at which number of alternatives should be - shown vertically instead of horizontally - :param show_results_names - bool to indicate whether results name annotations should be - included in the diagram - :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled - surrounding box - """ - # Convert the whole tree underneath the root - lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) - _to_diagram_element( - element, - lookup=lookup, - parent=None, - vertical=vertical, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - root_id = id(element) - # Convert the root if it hasn't been already - if root_id in lookup: - if not element.customName: - lookup[root_id].name = "" - lookup[root_id].mark_for_extraction(root_id, lookup, force=True) - - # Now that we're finished, we can convert from intermediate structures into Railroad elements - diags = list(lookup.diagrams.values()) - if len(diags) > 1: - # collapse out duplicate diags with the same name - seen = set() - deduped_diags = [] - for d in diags: - # don't extract SkipTo elements, they are uninformative as subdiagrams - if d.name == "...": - continue - if d.name is not None and d.name not in seen: - seen.add(d.name) - deduped_diags.append(d) - resolved = [resolve_partial(partial) for partial in deduped_diags] - else: - # special case - if just one diagram, always display it, even if - # it has no name - resolved = [resolve_partial(partial) for partial in diags] - return sorted(resolved, key=lambda diag: diag.index) - - -def _should_vertical( - specification: int, exprs: Iterable[pyparsing.ParserElement] -) -> bool: - """ - Returns true if we should return a vertical list of elements - """ - if specification is None: - return False - else: - return len(_visible_exprs(exprs)) >= specification - - -class ElementState: - """ - State recorded for an individual pyparsing Element - """ - - # Note: this should be a dataclass, but we have to support Python 3.5 - def __init__( - self, - element: pyparsing.ParserElement, - converted: EditablePartial, - parent: EditablePartial, - number: int, - name: str = None, - parent_index: typing.Optional[int] = None, - ): - #: The pyparsing element that this represents - self.element: pyparsing.ParserElement = element - #: The name of the element - self.name: typing.Optional[str] = name - #: The output Railroad element in an unconverted state - self.converted: EditablePartial = converted - #: The parent Railroad element, which we store so that we can extract this if it's duplicated - self.parent: EditablePartial = parent - #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram - self.number: int = number - #: The index of this inside its parent - self.parent_index: typing.Optional[int] = parent_index - #: If true, we should extract this out into a subdiagram - self.extract: bool = False - #: If true, all of this element's children have been filled out - self.complete: bool = False - - def mark_for_extraction( - self, el_id: int, state: "ConverterState", name: str = None, force: bool = False - ): - """ - Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram - :param el_id: id of the element - :param state: element/diagram state tracker - :param name: name to use for this element's text - :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the - root element when we know we're finished - """ - self.extract = True - - # Set the name - if not self.name: - if name: - # Allow forcing a custom name - self.name = name - elif self.element.customName: - self.name = self.element.customName - else: - self.name = "" - - # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children - # to be added - # Also, if this is just a string literal etc, don't bother extracting it - if force or (self.complete and _worth_extracting(self.element)): - state.extract_into_diagram(el_id) - - -class ConverterState: - """ - Stores some state that persists between recursions into the element tree - """ - - def __init__(self, diagram_kwargs: typing.Optional[dict] = None): - #: A dictionary mapping ParserElements to state relating to them - self._element_diagram_states: Dict[int, ElementState] = {} - #: A dictionary mapping ParserElement IDs to subdiagrams generated from them - self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {} - #: The index of the next unnamed element - self.unnamed_index: int = 1 - #: The index of the next element. This is used for sorting - self.index: int = 0 - #: Shared kwargs that are used to customize the construction of diagrams - self.diagram_kwargs: dict = diagram_kwargs or {} - self.extracted_diagram_names: Set[str] = set() - - def __setitem__(self, key: int, value: ElementState): - self._element_diagram_states[key] = value - - def __getitem__(self, key: int) -> ElementState: - return self._element_diagram_states[key] - - def __delitem__(self, key: int): - del self._element_diagram_states[key] - - def __contains__(self, key: int): - return key in self._element_diagram_states - - def generate_unnamed(self) -> int: - """ - Generate a number used in the name of an otherwise unnamed diagram - """ - self.unnamed_index += 1 - return self.unnamed_index - - def generate_index(self) -> int: - """ - Generate a number used to index a diagram - """ - self.index += 1 - return self.index - - def extract_into_diagram(self, el_id: int): - """ - Used when we encounter the same token twice in the same tree. When this - happens, we replace all instances of that token with a terminal, and - create a new subdiagram for the token - """ - position = self[el_id] - - # Replace the original definition of this element with a regular block - if position.parent: - ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name) - if "item" in position.parent.kwargs: - position.parent.kwargs["item"] = ret - elif "items" in position.parent.kwargs: - position.parent.kwargs["items"][position.parent_index] = ret - - # If the element we're extracting is a group, skip to its content but keep the title - if position.converted.func == railroad.Group: - content = position.converted.kwargs["item"] - else: - content = position.converted - - self.diagrams[el_id] = EditablePartial.from_call( - NamedDiagram, - name=position.name, - diagram=EditablePartial.from_call( - railroad.Diagram, content, **self.diagram_kwargs - ), - index=position.number, - ) - - del self[el_id] - - -def _worth_extracting(element: pyparsing.ParserElement) -> bool: - """ - Returns true if this element is worth having its own sub-diagram. Simply, if any of its children - themselves have children, then its complex enough to extract - """ - children = element.recurse() - return any(child.recurse() for child in children) - - -def _apply_diagram_item_enhancements(fn): - """ - decorator to ensure enhancements to a diagram item (such as results name annotations) - get applied on return from _to_diagram_element (we do this since there are several - returns in _to_diagram_element) - """ - - def _inner( - element: pyparsing.ParserElement, - parent: typing.Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - show_groups: bool = False, - ) -> typing.Optional[EditablePartial]: - - ret = fn( - element, - parent, - lookup, - vertical, - index, - name_hint, - show_results_names, - show_groups, - ) - - # apply annotation for results name, if present - if show_results_names and ret is not None: - element_results_name = element.resultsName - if element_results_name: - # add "*" to indicate if this is a "list all results" name - element_results_name += "" if element.modalResults else "*" - ret = EditablePartial.from_call( - railroad.Group, item=ret, label=element_results_name - ) - - return ret - - return _inner - - -def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): - non_diagramming_exprs = ( - pyparsing.ParseElementEnhance, - pyparsing.PositionToken, - pyparsing.And._ErrorStop, - ) - return [ - e - for e in exprs - if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs)) - ] - - -@_apply_diagram_item_enhancements -def _to_diagram_element( - element: pyparsing.ParserElement, - parent: typing.Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - show_groups: bool = False, -) -> typing.Optional[EditablePartial]: - """ - Recursively converts a PyParsing Element to a railroad Element - :param lookup: The shared converter state that keeps track of useful things - :param index: The index of this element within the parent - :param parent: The parent of this element in the output tree - :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default), - it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never - do so - :param name_hint: If provided, this will override the generated name - :param show_results_names: bool flag indicating whether to add annotations for results names - :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed - :param show_groups: bool flag indicating whether to show groups using bounding box - """ - exprs = element.recurse() - name = name_hint or element.customName or element.__class__.__name__ - - # Python's id() is used to provide a unique identifier for elements - el_id = id(element) - - element_results_name = element.resultsName - - # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram - if not element.customName: - if isinstance( - element, - ( - # pyparsing.TokenConverter, - # pyparsing.Forward, - pyparsing.Located, - ), - ): - # However, if this element has a useful custom name, and its child does not, we can pass it on to the child - if exprs: - if not exprs[0].customName: - propagated_name = name - else: - propagated_name = None - - return _to_diagram_element( - element.expr, - parent=parent, - lookup=lookup, - vertical=vertical, - index=index, - name_hint=propagated_name, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - # If the element isn't worth extracting, we always treat it as the first time we say it - if _worth_extracting(element): - if el_id in lookup: - # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate, - # so we have to extract it into a new diagram. - looked_up = lookup[el_id] - looked_up.mark_for_extraction(el_id, lookup, name=name_hint) - ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name) - return ret - - elif el_id in lookup.diagrams: - # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we - # just put in a marker element that refers to the sub-diagram - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - return ret - - # Recursively convert child elements - # Here we find the most relevant Railroad element for matching pyparsing Element - # We use ``items=[]`` here to hold the place for where the child elements will go once created - if isinstance(element, pyparsing.And): - # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat - # (all will have the same name, and resultsName) - if not exprs: - return None - if len(set((e.name, e.resultsName) for e in exprs)) == 1: - ret = EditablePartial.from_call( - railroad.OneOrMore, item="", repeat=str(len(exprs)) - ) - elif _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Stack, items=[]) - else: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)): - if not exprs: - return None - if _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Choice, 0, items=[]) - else: - ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[]) - elif isinstance(element, pyparsing.Each): - if not exprs: - return None - ret = EditablePartial.from_call(EachItem, items=[]) - elif isinstance(element, pyparsing.NotAny): - ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="") - elif isinstance(element, pyparsing.FollowedBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") - elif isinstance(element, pyparsing.PrecededBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") - elif isinstance(element, pyparsing.Group): - if show_groups: - ret = EditablePartial.from_call(AnnotatedItem, label="", item="") - else: - ret = EditablePartial.from_call(railroad.Group, label="", item="") - elif isinstance(element, pyparsing.TokenConverter): - ret = EditablePartial.from_call( - AnnotatedItem, label=type(element).__name__.lower(), item="" - ) - elif isinstance(element, pyparsing.Opt): - ret = EditablePartial.from_call(railroad.Optional, item="") - elif isinstance(element, pyparsing.OneOrMore): - ret = EditablePartial.from_call(railroad.OneOrMore, item="") - elif isinstance(element, pyparsing.ZeroOrMore): - ret = EditablePartial.from_call(railroad.ZeroOrMore, item="") - elif isinstance(element, pyparsing.Group): - ret = EditablePartial.from_call( - railroad.Group, item=None, label=element_results_name - ) - elif isinstance(element, pyparsing.Empty) and not element.customName: - # Skip unnamed "Empty" elements - ret = None - elif len(exprs) > 1: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif len(exprs) > 0 and not element_results_name: - ret = EditablePartial.from_call(railroad.Group, item="", label=name) - else: - terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName) - ret = terminal - - if ret is None: - return - - # Indicate this element's position in the tree so we can extract it if necessary - lookup[el_id] = ElementState( - element=element, - converted=ret, - parent=parent, - parent_index=index, - number=lookup.generate_index(), - ) - if element.customName: - lookup[el_id].mark_for_extraction(el_id, lookup, element.customName) - - i = 0 - for expr in exprs: - # Add a placeholder index in case we have to extract the child before we even add it to the parent - if "items" in ret.kwargs: - ret.kwargs["items"].insert(i, None) - - item = _to_diagram_element( - expr, - parent=ret, - lookup=lookup, - vertical=vertical, - index=i, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - # Some elements don't need to be shown in the diagram - if item is not None: - if "item" in ret.kwargs: - ret.kwargs["item"] = item - elif "items" in ret.kwargs: - # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal - ret.kwargs["items"][i] = item - i += 1 - elif "items" in ret.kwargs: - # If we're supposed to skip this element, remove it from the parent - del ret.kwargs["items"][i] - - # If all this items children are none, skip this item - if ret and ( - ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0) - or ("item" in ret.kwargs and ret.kwargs["item"] is None) - ): - ret = EditablePartial.from_call(railroad.Terminal, name) - - # Mark this element as "complete", ie it has all of its children - if el_id in lookup: - lookup[el_id].complete = True - - if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete: - lookup.extract_into_diagram(el_id) - if ret is not None: - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - - return ret diff --git a/lib/pkg_resources/_vendor/pyparsing/exceptions.py b/lib/pkg_resources/_vendor/pyparsing/exceptions.py deleted file mode 100644 index a38447bb..00000000 --- a/lib/pkg_resources/_vendor/pyparsing/exceptions.py +++ /dev/null @@ -1,267 +0,0 @@ -# exceptions.py - -import re -import sys -import typing - -from .util import col, line, lineno, _collapse_string_to_ranges -from .unicode import pyparsing_unicode as ppu - - -class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic): - pass - - -_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums) -_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") - - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( - self, - pstr: str, - loc: int = 0, - msg: typing.Optional[str] = None, - elem=None, - ): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parser_element = self.parserElement = elem - self.args = (pstr, loc, msg) - - @staticmethod - def explain_exception(exc, depth=16): - """ - Method to take an exception and translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - exc - exception raised during parsing (need not be a ParseException, in support - of Python exceptions that might be raised in a parse action) - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - """ - import inspect - from .core import ParserElement - - if depth is None: - depth = sys.getrecursionlimit() - ret = [] - if isinstance(exc, ParseBaseException): - ret.append(exc.line) - ret.append(" " * (exc.column - 1) + "^") - ret.append("{}: {}".format(type(exc).__name__, exc)) - - if depth > 0: - callers = inspect.getinnerframes(exc.__traceback__, context=depth) - seen = set() - for i, ff in enumerate(callers[-depth:]): - frm = ff[0] - - f_self = frm.f_locals.get("self", None) - if isinstance(f_self, ParserElement): - if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"): - continue - if id(f_self) in seen: - continue - seen.add(id(f_self)) - - self_type = type(f_self) - ret.append( - "{}.{} - {}".format( - self_type.__module__, self_type.__name__, f_self - ) - ) - - elif f_self is not None: - self_type = type(f_self) - ret.append("{}.{}".format(self_type.__module__, self_type.__name__)) - - else: - code = frm.f_code - if code.co_name in ("wrapper", ""): - continue - - ret.append("{}".format(code.co_name)) - - depth -= 1 - if not depth: - break - - return "\n".join(ret) - - @classmethod - def _from_exception(cls, pe): - """ - internal factory method to simplify creating one type of ParseException - from another - avoids having __init__ signature conflicts among subclasses - """ - return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) - - @property - def line(self) -> str: - """ - Return the line of text where the exception occurred. - """ - return line(self.loc, self.pstr) - - @property - def lineno(self) -> int: - """ - Return the 1-based line number of text where the exception occurred. - """ - return lineno(self.loc, self.pstr) - - @property - def col(self) -> int: - """ - Return the 1-based column on the line of text where the exception occurred. - """ - return col(self.loc, self.pstr) - - @property - def column(self) -> int: - """ - Return the 1-based column on the line of text where the exception occurred. - """ - return col(self.loc, self.pstr) - - def __str__(self) -> str: - if self.pstr: - if self.loc >= len(self.pstr): - foundstr = ", found end of text" - else: - # pull out next word at error location - found_match = _exception_word_extractor.match(self.pstr, self.loc) - if found_match is not None: - found = found_match.group(0) - else: - found = self.pstr[self.loc : self.loc + 1] - foundstr = (", found %r" % found).replace(r"\\", "\\") - else: - foundstr = "" - return "{}{} (at char {}), (line:{}, col:{})".format( - self.msg, foundstr, self.loc, self.lineno, self.column - ) - - def __repr__(self): - return str(self) - - def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str: - """ - Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - markerString = marker_string if marker_string is not None else markerString - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join( - (line_str[:line_column], markerString, line_str[line_column:]) - ) - return line_str.strip() - - def explain(self, depth=16) -> str: - """ - Method to translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - - Example:: - - expr = pp.Word(pp.nums) * 3 - try: - expr.parse_string("123 456 A789") - except pp.ParseException as pe: - print(pe.explain(depth=0)) - - prints:: - - 123 456 A789 - ^ - ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9) - - Note: the diagnostic output will include string representations of the expressions - that failed to parse. These representations will be more helpful if you use `set_name` to - give identifiable names to your expressions. Otherwise they will use the default string - forms, which may be cryptic to read. - - Note: pyparsing's default truncation of exception tracebacks may also truncate the - stack of expressions that are displayed in the ``explain`` output. To get the full listing - of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True`` - """ - return self.explain_exception(self, depth) - - markInputline = mark_input_line - - -class ParseException(ParseBaseException): - """ - Exception thrown when a parse expression doesn't match the input string - - Example:: - - try: - Word(nums).set_name("integer").parse_string("ABC") - except ParseException as pe: - print(pe) - print("column: {}".format(pe.column)) - - prints:: - - Expected integer (at char 0), (line:1, col:1) - column: 1 - - """ - - -class ParseFatalException(ParseBaseException): - """ - User-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately - """ - - -class ParseSyntaxException(ParseFatalException): - """ - Just like :class:`ParseFatalException`, but thrown internally - when an :class:`ErrorStop` ('-' operator) indicates - that parsing is to stop immediately because an unbacktrackable - syntax error has been found. - """ - - -class RecursiveGrammarException(Exception): - """ - Exception thrown by :class:`ParserElement.validate` if the - grammar could be left-recursive; parser may need to enable - left recursion using :class:`ParserElement.enable_left_recursion` - """ - - def __init__(self, parseElementList): - self.parseElementTrace = parseElementList - - def __str__(self) -> str: - return "RecursiveGrammarException: {}".format(self.parseElementTrace) diff --git a/lib/pkg_resources/_vendor/pyparsing/helpers.py b/lib/pkg_resources/_vendor/pyparsing/helpers.py deleted file mode 100644 index 9588b3b7..00000000 --- a/lib/pkg_resources/_vendor/pyparsing/helpers.py +++ /dev/null @@ -1,1088 +0,0 @@ -# helpers.py -import html.entities -import re -import typing - -from . import __diag__ -from .core import * -from .util import _bslash, _flatten, _escape_regex_range_chars - - -# -# global helpers -# -def delimited_list( - expr: Union[str, ParserElement], - delim: Union[str, ParserElement] = ",", - combine: bool = False, - min: typing.Optional[int] = None, - max: typing.Optional[int] = None, - *, - allow_trailing_delim: bool = False, -) -> ParserElement: - """Helper to define a delimited list of expressions - the delimiter - defaults to ','. By default, the list elements and delimiters can - have intervening whitespace, and comments, but this can be - overridden by passing ``combine=True`` in the constructor. If - ``combine`` is set to ``True``, the matching tokens are - returned as a single token string, with the delimiters included; - otherwise, the matching tokens are returned as a list of tokens, - with the delimiters suppressed. - - If ``allow_trailing_delim`` is set to True, then the list may end with - a delimiter. - - Example:: - - delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc'] - delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] - """ - if isinstance(expr, str_type): - expr = ParserElement._literalStringClass(expr) - - dlName = "{expr} [{delim} {expr}]...{end}".format( - expr=str(expr.copy().streamline()), - delim=str(delim), - end=" [{}]".format(str(delim)) if allow_trailing_delim else "", - ) - - if not combine: - delim = Suppress(delim) - - if min is not None: - if min < 1: - raise ValueError("min must be greater than 0") - min -= 1 - if max is not None: - if min is not None and max <= min: - raise ValueError("max must be greater than, or equal to min") - max -= 1 - delimited_list_expr = expr + (delim + expr)[min, max] - - if allow_trailing_delim: - delimited_list_expr += Opt(delim) - - if combine: - return Combine(delimited_list_expr).set_name(dlName) - else: - return delimited_list_expr.set_name(dlName) - - -def counted_array( - expr: ParserElement, - int_expr: typing.Optional[ParserElement] = None, - *, - intExpr: typing.Optional[ParserElement] = None, -) -> ParserElement: - """Helper to define a counted list of expressions. - - This helper defines a pattern of the form:: - - integer expr expr expr... - - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the - leading count token is suppressed. - - If ``int_expr`` is specified, it should be a pyparsing expression - that produces an integer value. - - Example:: - - counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd'] - - # in this parser, the leading integer value is given in binary, - # '10' indicating that 2 values are in the array - binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2)) - counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd'] - - # if other fields must be parsed after the count but before the - # list items, give the fields results names and they will - # be preserved in the returned ParseResults: - count_with_metadata = integer + Word(alphas)("type") - typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items") - result = typed_array.parse_string("3 bool True True False") - print(result.dump()) - - # prints - # ['True', 'True', 'False'] - # - items: ['True', 'True', 'False'] - # - type: 'bool' - """ - intExpr = intExpr or int_expr - array_expr = Forward() - - def count_field_parse_action(s, l, t): - nonlocal array_expr - n = t[0] - array_expr <<= (expr * n) if n else Empty() - # clear list contents, but keep any named results - del t[:] - - if intExpr is None: - intExpr = Word(nums).set_parse_action(lambda t: int(t[0])) - else: - intExpr = intExpr.copy() - intExpr.set_name("arrayLen") - intExpr.add_parse_action(count_field_parse_action, call_during_try=True) - return (intExpr + array_expr).set_name("(len) " + str(expr) + "...") - - -def match_previous_literal(expr: ParserElement) -> ParserElement: - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = match_previous_literal(first) - match_expr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches a previous literal, will also match the leading - ``"1:1"`` in ``"1:10"``. If this is not desired, use - :class:`match_previous_expr`. Do *not* use with packrat parsing - enabled. - """ - rep = Forward() - - def copy_token_to_repeater(s, l, t): - if t: - if len(t) == 1: - rep << t[0] - else: - # flatten t tokens - tflat = _flatten(t.as_list()) - rep << And(Literal(tt) for tt in tflat) - else: - rep << Empty() - - expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) - rep.set_name("(prev) " + str(expr)) - return rep - - -def match_previous_expr(expr: ParserElement) -> ParserElement: - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = match_previous_expr(first) - match_expr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches by expressions, will *not* match the leading ``"1:1"`` - in ``"1:10"``; the expressions are evaluated first, and then - compared, so ``"1"`` is compared with ``"10"``. Do *not* use - with packrat parsing enabled. - """ - rep = Forward() - e2 = expr.copy() - rep <<= e2 - - def copy_token_to_repeater(s, l, t): - matchTokens = _flatten(t.as_list()) - - def must_match_these_tokens(s, l, t): - theseTokens = _flatten(t.as_list()) - if theseTokens != matchTokens: - raise ParseException( - s, l, "Expected {}, found{}".format(matchTokens, theseTokens) - ) - - rep.set_parse_action(must_match_these_tokens, callDuringTry=True) - - expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) - rep.set_name("(prev) " + str(expr)) - return rep - - -def one_of( - strs: Union[typing.Iterable[str], str], - caseless: bool = False, - use_regex: bool = True, - as_keyword: bool = False, - *, - useRegex: bool = True, - asKeyword: bool = False, -) -> ParserElement: - """Helper to quickly define a set of alternative :class:`Literal` s, - and makes sure to do longest-first testing when there is a conflict, - regardless of the input order, but returns - a :class:`MatchFirst` for best performance. - - Parameters: - - - ``strs`` - a string of space-delimited literals, or a collection of - string literals - - ``caseless`` - treat all literals as caseless - (default= ``False``) - - ``use_regex`` - as an optimization, will - generate a :class:`Regex` object; otherwise, will generate - a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if - creating a :class:`Regex` raises an exception) - (default= ``True``) - - ``as_keyword`` - enforce :class:`Keyword`-style matching on the - generated expressions - (default= ``False``) - - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility, - but will be removed in a future release - - Example:: - - comp_oper = one_of("< = > <= >= !=") - var = Word(alphas) - number = Word(nums) - term = var | number - comparison_expr = term + comp_oper + term - print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12")) - - prints:: - - [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] - """ - asKeyword = asKeyword or as_keyword - useRegex = useRegex and use_regex - - if ( - isinstance(caseless, str_type) - and __diag__.warn_on_multiple_string_args_to_oneof - ): - warnings.warn( - "More than one string argument passed to one_of, pass" - " choices as a list or space-delimited string", - stacklevel=2, - ) - - if caseless: - isequal = lambda a, b: a.upper() == b.upper() - masks = lambda a, b: b.upper().startswith(a.upper()) - parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral - else: - isequal = lambda a, b: a == b - masks = lambda a, b: b.startswith(a) - parseElementClass = Keyword if asKeyword else Literal - - symbols: List[str] = [] - if isinstance(strs, str_type): - symbols = strs.split() - elif isinstance(strs, Iterable): - symbols = list(strs) - else: - raise TypeError("Invalid argument to one_of, expected string or iterable") - if not symbols: - return NoMatch() - - # reorder given symbols to take care to avoid masking longer choices with shorter ones - # (but only if the given symbols are not just single characters) - if any(len(sym) > 1 for sym in symbols): - i = 0 - while i < len(symbols) - 1: - cur = symbols[i] - for j, other in enumerate(symbols[i + 1 :]): - if isequal(other, cur): - del symbols[i + j + 1] - break - elif masks(cur, other): - del symbols[i + j + 1] - symbols.insert(i, other) - break - else: - i += 1 - - if useRegex: - re_flags: int = re.IGNORECASE if caseless else 0 - - try: - if all(len(sym) == 1 for sym in symbols): - # symbols are just single characters, create range regex pattern - patt = "[{}]".format( - "".join(_escape_regex_range_chars(sym) for sym in symbols) - ) - else: - patt = "|".join(re.escape(sym) for sym in symbols) - - # wrap with \b word break markers if defining as keywords - if asKeyword: - patt = r"\b(?:{})\b".format(patt) - - ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols)) - - if caseless: - # add parse action to return symbols as specified, not in random - # casing as found in input string - symbol_map = {sym.lower(): sym for sym in symbols} - ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()]) - - return ret - - except re.error: - warnings.warn( - "Exception creating Regex for one_of, building MatchFirst", stacklevel=2 - ) - - # last resort, just use MatchFirst - return MatchFirst(parseElementClass(sym) for sym in symbols).set_name( - " | ".join(symbols) - ) - - -def dict_of(key: ParserElement, value: ParserElement) -> ParserElement: - """Helper to easily and clearly define a dictionary by specifying - the respective patterns for the key and value. Takes care of - defining the :class:`Dict`, :class:`ZeroOrMore`, and - :class:`Group` tokens in the proper order. The key pattern - can include delimiting markers or punctuation, as long as they are - suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the :class:`Dict` results - can include named token fields. - - Example:: - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - print(attr_expr[1, ...].parse_string(text).dump()) - - attr_label = label - attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join) - - # similar to Dict, but simpler call format - result = dict_of(attr_label, attr_value).parse_string(text) - print(result.dump()) - print(result['shape']) - print(result.shape) # object attribute access works too - print(result.as_dict()) - - prints:: - - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: 'light blue' - - posn: 'upper left' - - shape: 'SQUARE' - - texture: 'burlap' - SQUARE - SQUARE - {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} - """ - return Dict(OneOrMore(Group(key + value))) - - -def original_text_for( - expr: ParserElement, as_string: bool = True, *, asString: bool = True -) -> ParserElement: - """Helper to return the original, untokenized text for a given - expression. Useful to restore the parsed fields of an HTML start - tag into the raw tag text itself, or to revert separate tokens with - intervening whitespace back to the original matching input text. By - default, returns astring containing the original parsed text. - - If the optional ``as_string`` argument is passed as - ``False``, then the return value is - a :class:`ParseResults` containing any results names that - were originally matched, and a single token containing the original - matched text from the input string. So if the expression passed to - :class:`original_text_for` contains expressions with defined - results names, you must set ``as_string`` to ``False`` if you - want to preserve those results name values. - - The ``asString`` pre-PEP8 argument is retained for compatibility, - but will be removed in a future release. - - Example:: - - src = "this is test bold text normal text " - for tag in ("b", "i"): - opener, closer = make_html_tags(tag) - patt = original_text_for(opener + SkipTo(closer) + closer) - print(patt.search_string(src)[0]) - - prints:: - - [' bold text '] - ['text'] - """ - asString = asString and as_string - - locMarker = Empty().set_parse_action(lambda s, loc, t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s, l, t: s[t._original_start : t._original_end] - else: - - def extractText(s, l, t): - t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]] - - matchExpr.set_parse_action(extractText) - matchExpr.ignoreExprs = expr.ignoreExprs - matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection) - return matchExpr - - -def ungroup(expr: ParserElement) -> ParserElement: - """Helper to undo pyparsing's default grouping of And expressions, - even if all but one are non-empty. - """ - return TokenConverter(expr).add_parse_action(lambda t: t[0]) - - -def locatedExpr(expr: ParserElement) -> ParserElement: - """ - (DEPRECATED - future code should use the Located class) - Helper to decorate a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - ``locn_start`` - location where matched expression begins - - ``locn_end`` - location where matched expression ends - - ``value`` - the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parseWithTabs` - - Example:: - - wd = Word(alphas) - for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [[0, 'ljsdf', 5]] - [[8, 'lksdjjf', 15]] - [[18, 'lkkjj', 23]] - """ - locator = Empty().set_parse_action(lambda ss, ll, tt: ll) - return Group( - locator("locn_start") - + expr("value") - + locator.copy().leaveWhitespace()("locn_end") - ) - - -def nested_expr( - opener: Union[str, ParserElement] = "(", - closer: Union[str, ParserElement] = ")", - content: typing.Optional[ParserElement] = None, - ignore_expr: ParserElement = quoted_string(), - *, - ignoreExpr: ParserElement = quoted_string(), -) -> ParserElement: - """Helper method for defining nested lists enclosed in opening and - closing delimiters (``"("`` and ``")"`` are the default). - - Parameters: - - ``opener`` - opening character for a nested list - (default= ``"("``); can also be a pyparsing expression - - ``closer`` - closing character for a nested list - (default= ``")"``); can also be a pyparsing expression - - ``content`` - expression for items within the nested lists - (default= ``None``) - - ``ignore_expr`` - expression for ignoring opening and closing delimiters - (default= :class:`quoted_string`) - - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility - but will be removed in a future release - - If an expression is not provided for the content argument, the - nested expression will capture all whitespace-delimited content - between delimiters as a list of separate values. - - Use the ``ignore_expr`` argument to define expressions that may - contain opening or closing characters that should not be treated as - opening or closing characters for nesting, such as quoted_string or - a comment expression. Specify multiple expressions using an - :class:`Or` or :class:`MatchFirst`. The default is - :class:`quoted_string`, but if no expressions are to be ignored, then - pass ``None`` for this argument. - - Example:: - - data_type = one_of("void int short long char float double") - decl_data_type = Combine(data_type + Opt(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR, RPAR = map(Suppress, "()") - - code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Opt(delimited_list(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(c_style_comment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.search_string(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - - prints:: - - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if ignoreExpr != ignore_expr: - ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener, str_type) and isinstance(closer, str_type): - if len(opener) == 1 and len(closer) == 1: - if ignoreExpr is not None: - content = Combine( - OneOrMore( - ~ignoreExpr - + CharsNotIn( - opener + closer + ParserElement.DEFAULT_WHITE_CHARS, - exact=1, - ) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - content = empty.copy() + CharsNotIn( - opener + closer + ParserElement.DEFAULT_WHITE_CHARS - ).set_parse_action(lambda t: t[0].strip()) - else: - if ignoreExpr is not None: - content = Combine( - OneOrMore( - ~ignoreExpr - + ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - content = Combine( - OneOrMore( - ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - raise ValueError( - "opening and closing arguments must be strings if no content expression is given" - ) - ret = Forward() - if ignoreExpr is not None: - ret <<= Group( - Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer) - ) - else: - ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) - ret.set_name("nested %s%s expression" % (opener, closer)) - return ret - - -def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr, str_type): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas, alphanums + "_-:") - if xml: - tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) - openTag = ( - suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) - + Opt("/", default=[False])("empty").set_parse_action( - lambda s, l, t: t[0] == "/" - ) - + suppress_GT - ) - else: - tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( - printables, exclude_chars=">" - ) - openTag = ( - suppress_LT - + tagStr("tag") - + Dict( - ZeroOrMore( - Group( - tagAttrName.set_parse_action(lambda t: t[0].lower()) - + Opt(Suppress("=") + tagAttrValue) - ) - ) - ) - + Opt("/", default=[False])("empty").set_parse_action( - lambda s, l, t: t[0] == "/" - ) - + suppress_GT - ) - closeTag = Combine(Literal("", adjacent=False) - - openTag.set_name("<%s>" % resname) - # add start results name in parse action now that ungrouped names are not reported at two levels - openTag.add_parse_action( - lambda t: t.__setitem__( - "start" + "".join(resname.replace(":", " ").title().split()), t.copy() - ) - ) - closeTag = closeTag( - "end" + "".join(resname.replace(":", " ").title().split()) - ).set_name("" % resname) - openTag.tag = resname - closeTag.tag = resname - openTag.tag_body = SkipTo(closeTag()) - return openTag, closeTag - - -def make_html_tags( - tag_str: Union[str, ParserElement] -) -> Tuple[ParserElement, ParserElement]: - """Helper to construct opening and closing tag expressions for HTML, - given a tag name. Matches tags in either upper or lower case, - attributes with namespaces and with quoted or unquoted values. - - Example:: - - text = 'More info at the pyparsing wiki page' - # make_html_tags returns pyparsing expressions for the opening and - # closing tags as a 2-tuple - a, a_end = make_html_tags("A") - link_expr = a + SkipTo(a_end)("link_text") + a_end - - for link in link_expr.search_string(text): - # attributes in the tag (like "href" shown here) are - # also accessible as named results - print(link.link_text, '->', link.href) - - prints:: - - pyparsing -> https://github.com/pyparsing/pyparsing/wiki - """ - return _makeTags(tag_str, False) - - -def make_xml_tags( - tag_str: Union[str, ParserElement] -) -> Tuple[ParserElement, ParserElement]: - """Helper to construct opening and closing tag expressions for XML, - given a tag name. Matches tags only in the given upper/lower case. - - Example: similar to :class:`make_html_tags` - """ - return _makeTags(tag_str, True) - - -any_open_tag: ParserElement -any_close_tag: ParserElement -any_open_tag, any_close_tag = make_html_tags( - Word(alphas, alphanums + "_:").set_name("any tag") -) - -_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()} -common_html_entity = Regex("&(?P" + "|".join(_htmlEntityMap) + ");").set_name( - "common HTML entity" -) - - -def replace_html_entity(t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - - -class OpAssoc(Enum): - LEFT = 1 - RIGHT = 2 - - -InfixNotationOperatorArgType = Union[ - ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]] -] -InfixNotationOperatorSpec = Union[ - Tuple[ - InfixNotationOperatorArgType, - int, - OpAssoc, - typing.Optional[ParseAction], - ], - Tuple[ - InfixNotationOperatorArgType, - int, - OpAssoc, - ], -] - - -def infix_notation( - base_expr: ParserElement, - op_list: List[InfixNotationOperatorSpec], - lpar: Union[str, ParserElement] = Suppress("("), - rpar: Union[str, ParserElement] = Suppress(")"), -) -> ParserElement: - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary - or binary, left- or right-associative. Parse actions can also be - attached to operator expressions. The generated parser will also - recognize the use of parentheses to override operator precedences - (see example below). - - Note: if you define a deep operator list, you may see performance - issues when using infix_notation. See - :class:`ParserElement.enable_packrat` for a mechanism to potentially - improve your parser performance. - - Parameters: - - ``base_expr`` - expression representing the most basic operand to - be used in the expression - - ``op_list`` - list of tuples, one for each operator precedence level - in the expression grammar; each tuple is of the form ``(op_expr, - num_operands, right_left_assoc, (optional)parse_action)``, where: - - - ``op_expr`` is the pyparsing expression for the operator; may also - be a string, which will be converted to a Literal; if ``num_operands`` - is 3, ``op_expr`` is a tuple of two expressions, for the two - operators separating the 3 terms - - ``num_operands`` is the number of terms for this operator (must be 1, - 2, or 3) - - ``right_left_assoc`` is the indicator whether the operator is right - or left associative, using the pyparsing-defined constants - ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``. - - ``parse_action`` is the parse action to be associated with - expressions matching this operator expression (the parse action - tuple member may be omitted); if the parse action is passed - a tuple or list of functions, this is equivalent to calling - ``set_parse_action(*fn)`` - (:class:`ParserElement.set_parse_action`) - - ``lpar`` - expression for matching left-parentheses; if passed as a - str, then will be parsed as Suppress(lpar). If lpar is passed as - an expression (such as ``Literal('(')``), then it will be kept in - the parsed results, and grouped with them. (default= ``Suppress('(')``) - - ``rpar`` - expression for matching right-parentheses; if passed as a - str, then will be parsed as Suppress(rpar). If rpar is passed as - an expression (such as ``Literal(')')``), then it will be kept in - the parsed results, and grouped with them. (default= ``Suppress(')')``) - - Example:: - - # simple example of four-function arithmetic with ints and - # variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infix_notation(integer | varname, - [ - ('-', 1, OpAssoc.RIGHT), - (one_of('* /'), 2, OpAssoc.LEFT), - (one_of('+ -'), 2, OpAssoc.LEFT), - ]) - - arith_expr.run_tests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', full_dump=False) - - prints:: - - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - # captive version of FollowedBy that does not do parse actions or capture results names - class _FB(FollowedBy): - def parseImpl(self, instring, loc, doActions=True): - self.expr.try_parse(instring, loc) - return loc, [] - - _FB.__name__ = "FollowedBy>" - - ret = Forward() - if isinstance(lpar, str): - lpar = Suppress(lpar) - if isinstance(rpar, str): - rpar = Suppress(rpar) - - # if lpar and rpar are not suppressed, wrap in group - if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)): - lastExpr = base_expr | Group(lpar + ret + rpar) - else: - lastExpr = base_expr | (lpar + ret + rpar) - - for i, operDef in enumerate(op_list): - opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] - if isinstance(opExpr, str_type): - opExpr = ParserElement._literalStringClass(opExpr) - if arity == 3: - if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2: - raise ValueError( - "if numterms=3, opExpr must be a tuple or list of two expressions" - ) - opExpr1, opExpr2 = opExpr - term_name = "{}{} term".format(opExpr1, opExpr2) - else: - term_name = "{} term".format(opExpr) - - if not 1 <= arity <= 3: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - - if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT): - raise ValueError("operator must indicate right or left associativity") - - thisExpr: Forward = Forward().set_name(term_name) - if rightLeftAssoc is OpAssoc.LEFT: - if arity == 1: - matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...]) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( - lastExpr + (opExpr + lastExpr)[1, ...] - ) - else: - matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...]) - elif arity == 3: - matchExpr = _FB( - lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr - ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr)) - elif rightLeftAssoc is OpAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Opt): - opExpr = Opt(opExpr) - matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( - lastExpr + (opExpr + thisExpr)[1, ...] - ) - else: - matchExpr = _FB(lastExpr + thisExpr) + Group( - lastExpr + thisExpr[1, ...] - ) - elif arity == 3: - matchExpr = _FB( - lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr - ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) - if pa: - if isinstance(pa, (tuple, list)): - matchExpr.set_parse_action(*pa) - else: - matchExpr.set_parse_action(pa) - thisExpr <<= (matchExpr | lastExpr).setName(term_name) - lastExpr = thisExpr - ret <<= lastExpr - return ret - - -def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]): - """ - (DEPRECATED - use IndentedBlock class instead) - Helper method for defining space-delimited indentation blocks, - such as those used to define block statements in Python source code. - - Parameters: - - - ``blockStatementExpr`` - expression defining syntax of statement that - is repeated within the indented block - - ``indentStack`` - list created by caller to manage indentation stack - (multiple ``statementWithIndentedBlock`` expressions within a single - grammar should share a common ``indentStack``) - - ``indent`` - boolean indicating whether block must be indented beyond - the current level; set to ``False`` for block of left-most statements - (default= ``True``) - - A valid block must contain at least one ``blockStatement``. - - (Note that indentedBlock uses internal parse actions which make it - incompatible with packrat parsing.) - - Example:: - - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group(funcDecl + func_body) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << (funcDef | assignment | identifier) - - module_body = stmt[1, ...] - - parseTree = module_body.parseString(data) - parseTree.pprint() - - prints:: - - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - backup_stacks.append(indentStack[:]) - - def reset_stack(): - indentStack[:] = backup_stacks[-1] - - def checkPeerIndent(s, l, t): - if l >= len(s): - return - curCol = col(l, s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseException(s, l, "illegal nesting") - raise ParseException(s, l, "not a peer entry") - - def checkSubIndent(s, l, t): - curCol = col(l, s) - if curCol > indentStack[-1]: - indentStack.append(curCol) - else: - raise ParseException(s, l, "not a subentry") - - def checkUnindent(s, l, t): - if l >= len(s): - return - curCol = col(l, s) - if not (indentStack and curCol in indentStack): - raise ParseException(s, l, "not an unindent") - if curCol < indentStack[-1]: - indentStack.pop() - - NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress()) - INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT") - PEER = Empty().set_parse_action(checkPeerIndent).set_name("") - UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT") - if indent: - smExpr = Group( - Opt(NL) - + INDENT - + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) - + UNDENT - ) - else: - smExpr = Group( - Opt(NL) - + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) - + Opt(UNDENT) - ) - - # add a parse action to remove backup_stack from list of backups - smExpr.add_parse_action( - lambda: backup_stacks.pop(-1) and None if backup_stacks else None - ) - smExpr.set_fail_action(lambda a, b, c, d: reset_stack()) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.set_name("indented block") - - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name( - "C style comment" -) -"Comment of the form ``/* ... */``" - -html_comment = Regex(r"").set_name("HTML comment") -"Comment of the form ````" - -rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line") -dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment") -"Comment of the form ``// ... (to end of line)``" - -cpp_style_comment = Combine( - Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment -).set_name("C++ style comment") -"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`" - -java_style_comment = cpp_style_comment -"Same as :class:`cpp_style_comment`" - -python_style_comment = Regex(r"#.*").set_name("Python style comment") -"Comment of the form ``# ... (to end of line)``" - - -# build list of built-in expressions, for future reference if a global default value -# gets updated -_builtin_exprs: List[ParserElement] = [ - v for v in vars().values() if isinstance(v, ParserElement) -] - - -# pre-PEP8 compatible names -delimitedList = delimited_list -countedArray = counted_array -matchPreviousLiteral = match_previous_literal -matchPreviousExpr = match_previous_expr -oneOf = one_of -dictOf = dict_of -originalTextFor = original_text_for -nestedExpr = nested_expr -makeHTMLTags = make_html_tags -makeXMLTags = make_xml_tags -anyOpenTag, anyCloseTag = any_open_tag, any_close_tag -commonHTMLEntity = common_html_entity -replaceHTMLEntity = replace_html_entity -opAssoc = OpAssoc -infixNotation = infix_notation -cStyleComment = c_style_comment -htmlComment = html_comment -restOfLine = rest_of_line -dblSlashComment = dbl_slash_comment -cppStyleComment = cpp_style_comment -javaStyleComment = java_style_comment -pythonStyleComment = python_style_comment diff --git a/lib/pkg_resources/_vendor/pyparsing/results.py b/lib/pkg_resources/_vendor/pyparsing/results.py deleted file mode 100644 index 00c9421d..00000000 --- a/lib/pkg_resources/_vendor/pyparsing/results.py +++ /dev/null @@ -1,760 +0,0 @@ -# results.py -from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator -import pprint -from weakref import ref as wkref -from typing import Tuple, Any - -str_type: Tuple[type, ...] = (str, bytes) -_generator_type = type((_ for _ in ())) - - -class _ParseResultsWithOffset: - __slots__ = ["tup"] - - def __init__(self, p1, p2): - self.tup = (p1, p2) - - def __getitem__(self, i): - return self.tup[i] - - def __getstate__(self): - return self.tup - - def __setstate__(self, *args): - self.tup = args[0] - - -class ParseResults: - """Structured parse results, to provide multiple means of access to - the parsed data: - - - as a list (``len(results)``) - - by list index (``results[0], results[1]``, etc.) - - by attribute (``results.`` - see :class:`ParserElement.set_results_name`) - - Example:: - - integer = Word(nums) - date_str = (integer.set_results_name("year") + '/' - + integer.set_results_name("month") + '/' - + integer.set_results_name("day")) - # equivalent form: - # date_str = (integer("year") + '/' - # + integer("month") + '/' - # + integer("day")) - - # parse_string returns a ParseResults object - result = date_str.parse_string("1999/12/31") - - def test(s, fn=repr): - print("{} -> {}".format(s, fn(eval(s)))) - test("list(result)") - test("result[0]") - test("result['month']") - test("result.day") - test("'month' in result") - test("'minutes' in result") - test("result.dump()", str) - - prints:: - - list(result) -> ['1999', '/', '12', '/', '31'] - result[0] -> '1999' - result['month'] -> '12' - result.day -> '31' - 'month' in result -> True - 'minutes' in result -> False - result.dump() -> ['1999', '/', '12', '/', '31'] - - day: '31' - - month: '12' - - year: '1999' - """ - - _null_values: Tuple[Any, ...] = (None, [], "", ()) - - __slots__ = [ - "_name", - "_parent", - "_all_names", - "_modal", - "_toklist", - "_tokdict", - "__weakref__", - ] - - class List(list): - """ - Simple wrapper class to distinguish parsed list results that should be preserved - as actual Python lists, instead of being converted to :class:`ParseResults`: - - LBRACK, RBRACK = map(pp.Suppress, "[]") - element = pp.Forward() - item = ppc.integer - element_list = LBRACK + pp.delimited_list(element) + RBRACK - - # add parse actions to convert from ParseResults to actual Python collection types - def as_python_list(t): - return pp.ParseResults.List(t.as_list()) - element_list.add_parse_action(as_python_list) - - element <<= item | element_list - - element.run_tests(''' - 100 - [2,3,4] - [[2, 1],3,4] - [(2, 1),3,4] - (2,3,4) - ''', post_parse=lambda s, r: (r[0], type(r[0]))) - - prints: - - 100 - (100, ) - - [2,3,4] - ([2, 3, 4], ) - - [[2, 1],3,4] - ([[2, 1], 3, 4], ) - - (Used internally by :class:`Group` when `aslist=True`.) - """ - - def __new__(cls, contained=None): - if contained is None: - contained = [] - - if not isinstance(contained, list): - raise TypeError( - "{} may only be constructed with a list," - " not {}".format(cls.__name__, type(contained).__name__) - ) - - return list.__new__(cls) - - def __new__(cls, toklist=None, name=None, **kwargs): - if isinstance(toklist, ParseResults): - return toklist - self = object.__new__(cls) - self._name = None - self._parent = None - self._all_names = set() - - if toklist is None: - self._toklist = [] - elif isinstance(toklist, (list, _generator_type)): - self._toklist = ( - [toklist[:]] - if isinstance(toklist, ParseResults.List) - else list(toklist) - ) - else: - self._toklist = [toklist] - self._tokdict = dict() - return self - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( - self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance - ): - self._modal = modal - if name is not None and name != "": - if isinstance(name, int): - name = str(name) - if not modal: - self._all_names = {name} - self._name = name - if toklist not in self._null_values: - if isinstance(toklist, (str_type, type)): - toklist = [toklist] - if asList: - if isinstance(toklist, ParseResults): - self[name] = _ParseResultsWithOffset( - ParseResults(toklist._toklist), 0 - ) - else: - self[name] = _ParseResultsWithOffset( - ParseResults(toklist[0]), 0 - ) - self[name]._name = name - else: - try: - self[name] = toklist[0] - except (KeyError, TypeError, IndexError): - if toklist is not self: - self[name] = toklist - else: - self._name = name - - def __getitem__(self, i): - if isinstance(i, (int, slice)): - return self._toklist[i] - else: - if i not in self._all_names: - return self._tokdict[i][-1][0] - else: - return ParseResults([v[0] for v in self._tokdict[i]]) - - def __setitem__(self, k, v, isinstance=isinstance): - if isinstance(v, _ParseResultsWithOffset): - self._tokdict[k] = self._tokdict.get(k, list()) + [v] - sub = v[0] - elif isinstance(k, (int, slice)): - self._toklist[k] = v - sub = v - else: - self._tokdict[k] = self._tokdict.get(k, list()) + [ - _ParseResultsWithOffset(v, 0) - ] - sub = v - if isinstance(sub, ParseResults): - sub._parent = wkref(self) - - def __delitem__(self, i): - if isinstance(i, (int, slice)): - mylen = len(self._toklist) - del self._toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i + 1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name, occurrences in self._tokdict.items(): - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset( - value, position - (position > j) - ) - else: - del self._tokdict[i] - - def __contains__(self, k) -> bool: - return k in self._tokdict - - def __len__(self) -> int: - return len(self._toklist) - - def __bool__(self) -> bool: - return not not (self._toklist or self._tokdict) - - def __iter__(self) -> Iterator: - return iter(self._toklist) - - def __reversed__(self) -> Iterator: - return iter(self._toklist[::-1]) - - def keys(self): - return iter(self._tokdict) - - def values(self): - return (self[k] for k in self.keys()) - - def items(self): - return ((k, self[k]) for k in self.keys()) - - def haskeys(self) -> bool: - """ - Since ``keys()`` returns an iterator, this method is helpful in bypassing - code that looks for the existence of any defined results names.""" - return bool(self._tokdict) - - def pop(self, *args, **kwargs): - """ - Removes and returns item at specified index (default= ``last``). - Supports both ``list`` and ``dict`` semantics for ``pop()``. If - passed no argument or an integer argument, it will use ``list`` - semantics and pop tokens from the list of parsed tokens. If passed - a non-integer argument (most likely a string), it will use ``dict`` - semantics and pop the corresponding value from any defined results - names. A second default return value argument is supported, just as in - ``dict.pop()``. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - def remove_first(tokens): - tokens.pop(0) - numlist.add_parse_action(remove_first) - print(numlist.parse_string("0 123 321")) # -> ['123', '321'] - - label = Word(alphas) - patt = label("LABEL") + Word(nums)[1, ...] - print(patt.parse_string("AAB 123 321").dump()) - - # Use pop() in a parse action to remove named result (note that corresponding value is not - # removed from list form of results) - def remove_LABEL(tokens): - tokens.pop("LABEL") - return tokens - patt.add_parse_action(remove_LABEL) - print(patt.parse_string("AAB 123 321").dump()) - - prints:: - - ['AAB', '123', '321'] - - LABEL: 'AAB' - - ['AAB', '123', '321'] - """ - if not args: - args = [-1] - for k, v in kwargs.items(): - if k == "default": - args = (args[0], v) - else: - raise TypeError( - "pop() got an unexpected keyword argument {!r}".format(k) - ) - if isinstance(args[0], int) or len(args) == 1 or args[0] in self: - index = args[0] - ret = self[index] - del self[index] - return ret - else: - defaultvalue = args[1] - return defaultvalue - - def get(self, key, default_value=None): - """ - Returns named result matching the given key, or if there is no - such name, then returns the given ``default_value`` or ``None`` if no - ``default_value`` is specified. - - Similar to ``dict.get()``. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string("1999/12/31") - print(result.get("year")) # -> '1999' - print(result.get("hour", "not specified")) # -> 'not specified' - print(result.get("hour")) # -> None - """ - if key in self: - return self[key] - else: - return default_value - - def insert(self, index, ins_string): - """ - Inserts new element at location index in the list of parsed tokens. - - Similar to ``list.insert()``. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to insert the parse location in the front of the parsed results - def insert_locn(locn, tokens): - tokens.insert(0, locn) - numlist.add_parse_action(insert_locn) - print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321'] - """ - self._toklist.insert(index, ins_string) - # fixup indices in token dictionary - for name, occurrences in self._tokdict.items(): - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset( - value, position + (position > index) - ) - - def append(self, item): - """ - Add single element to end of ``ParseResults`` list of elements. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to compute the sum of the parsed integers, and add it to the end - def append_sum(tokens): - tokens.append(sum(map(int, tokens))) - numlist.add_parse_action(append_sum) - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444] - """ - self._toklist.append(item) - - def extend(self, itemseq): - """ - Add sequence of elements to end of ``ParseResults`` list of elements. - - Example:: - - patt = Word(alphas)[1, ...] - - # use a parse action to append the reverse of the matched strings, to make a palindrome - def make_palindrome(tokens): - tokens.extend(reversed([t[::-1] for t in tokens])) - return ''.join(tokens) - patt.add_parse_action(make_palindrome) - print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' - """ - if isinstance(itemseq, ParseResults): - self.__iadd__(itemseq) - else: - self._toklist.extend(itemseq) - - def clear(self): - """ - Clear all elements and results names. - """ - del self._toklist[:] - self._tokdict.clear() - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - if name.startswith("__"): - raise AttributeError(name) - return "" - - def __add__(self, other) -> "ParseResults": - ret = self.copy() - ret += other - return ret - - def __iadd__(self, other) -> "ParseResults": - if other._tokdict: - offset = len(self._toklist) - addoffset = lambda a: offset if a < 0 else a + offset - otheritems = other._tokdict.items() - otherdictitems = [ - (k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) - for k, vlist in otheritems - for v in vlist - ] - for k, v in otherdictitems: - self[k] = v - if isinstance(v[0], ParseResults): - v[0]._parent = wkref(self) - - self._toklist += other._toklist - self._all_names |= other._all_names - return self - - def __radd__(self, other) -> "ParseResults": - if isinstance(other, int) and other == 0: - # useful for merging many ParseResults using sum() builtin - return self.copy() - else: - # this may raise a TypeError - so be it - return other + self - - def __repr__(self) -> str: - return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict()) - - def __str__(self) -> str: - return ( - "[" - + ", ".join( - [ - str(i) if isinstance(i, ParseResults) else repr(i) - for i in self._toklist - ] - ) - + "]" - ) - - def _asStringList(self, sep=""): - out = [] - for item in self._toklist: - if out and sep: - out.append(sep) - if isinstance(item, ParseResults): - out += item._asStringList() - else: - out.append(str(item)) - return out - - def as_list(self) -> list: - """ - Returns the parse results as a nested list of matching tokens, all converted to strings. - - Example:: - - patt = Word(alphas)[1, ...] - result = patt.parse_string("sldkj lsdkj sldkj") - # even though the result prints in string-like form, it is actually a pyparsing ParseResults - print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] - - # Use as_list() to create an actual list - result_list = result.as_list() - print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] - """ - return [ - res.as_list() if isinstance(res, ParseResults) else res - for res in self._toklist - ] - - def as_dict(self) -> dict: - """ - Returns the named parse results as a nested dictionary. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string('12/31/1999') - print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) - - result_dict = result.as_dict() - print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} - - # even though a ParseResults supports dict-like access, sometime you just need to have a dict - import json - print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable - print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"} - """ - - def to_item(obj): - if isinstance(obj, ParseResults): - return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj] - else: - return obj - - return dict((k, to_item(v)) for k, v in self.items()) - - def copy(self) -> "ParseResults": - """ - Returns a new copy of a :class:`ParseResults` object. - """ - ret = ParseResults(self._toklist) - ret._tokdict = self._tokdict.copy() - ret._parent = self._parent - ret._all_names |= self._all_names - ret._name = self._name - return ret - - def get_name(self): - r""" - Returns the results name for this token expression. Useful when several - different expressions might match at a particular location. - - Example:: - - integer = Word(nums) - ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") - house_number_expr = Suppress('#') + Word(nums, alphanums) - user_data = (Group(house_number_expr)("house_number") - | Group(ssn_expr)("ssn") - | Group(integer)("age")) - user_info = user_data[1, ...] - - result = user_info.parse_string("22 111-22-3333 #221B") - for item in result: - print(item.get_name(), ':', item[0]) - - prints:: - - age : 22 - ssn : 111-22-3333 - house_number : 221B - """ - if self._name: - return self._name - elif self._parent: - par = self._parent() - - def find_in_parent(sub): - return next( - ( - k - for k, vlist in par._tokdict.items() - for v, loc in vlist - if sub is v - ), - None, - ) - - return find_in_parent(self) if par else None - elif ( - len(self) == 1 - and len(self._tokdict) == 1 - and next(iter(self._tokdict.values()))[0][1] in (0, -1) - ): - return next(iter(self._tokdict.keys())) - else: - return None - - def dump(self, indent="", full=True, include_list=True, _depth=0) -> str: - """ - Diagnostic method for listing out the contents of - a :class:`ParseResults`. Accepts an optional ``indent`` argument so - that this string can be embedded in a nested display of other data. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string('1999/12/31') - print(result.dump()) - - prints:: - - ['1999', '/', '12', '/', '31'] - - day: '31' - - month: '12' - - year: '1999' - """ - out = [] - NL = "\n" - out.append(indent + str(self.as_list()) if include_list else "") - - if full: - if self.haskeys(): - items = sorted((str(k), v) for k, v in self.items()) - for k, v in items: - if out: - out.append(NL) - out.append("{}{}- {}: ".format(indent, (" " * _depth), k)) - if isinstance(v, ParseResults): - if v: - out.append( - v.dump( - indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1, - ) - ) - else: - out.append(str(v)) - else: - out.append(repr(v)) - if any(isinstance(vv, ParseResults) for vv in self): - v = self - for i, vv in enumerate(v): - if isinstance(vv, ParseResults): - out.append( - "\n{}{}[{}]:\n{}{}{}".format( - indent, - (" " * (_depth)), - i, - indent, - (" " * (_depth + 1)), - vv.dump( - indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1, - ), - ) - ) - else: - out.append( - "\n%s%s[%d]:\n%s%s%s" - % ( - indent, - (" " * (_depth)), - i, - indent, - (" " * (_depth + 1)), - str(vv), - ) - ) - - return "".join(out) - - def pprint(self, *args, **kwargs): - """ - Pretty-printer for parsed results as a list, using the - `pprint `_ module. - Accepts additional positional or keyword args as defined for - `pprint.pprint `_ . - - Example:: - - ident = Word(alphas, alphanums) - num = Word(nums) - func = Forward() - term = ident | num | Group('(' + func + ')') - func <<= ident + Group(Optional(delimited_list(term))) - result = func.parse_string("fna a,b,(fnb c,d,200),100") - result.pprint(width=40) - - prints:: - - ['fna', - ['a', - 'b', - ['(', 'fnb', ['c', 'd', '200'], ')'], - '100']] - """ - pprint.pprint(self.as_list(), *args, **kwargs) - - # add support for pickle protocol - def __getstate__(self): - return ( - self._toklist, - ( - self._tokdict.copy(), - self._parent is not None and self._parent() or None, - self._all_names, - self._name, - ), - ) - - def __setstate__(self, state): - self._toklist, (self._tokdict, par, inAccumNames, self._name) = state - self._all_names = set(inAccumNames) - if par is not None: - self._parent = wkref(par) - else: - self._parent = None - - def __getnewargs__(self): - return self._toklist, self._name - - def __dir__(self): - return dir(type(self)) + list(self.keys()) - - @classmethod - def from_dict(cls, other, name=None) -> "ParseResults": - """ - Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the - name-value relations as results names. If an optional ``name`` argument is - given, a nested ``ParseResults`` will be returned. - """ - - def is_iterable(obj): - try: - iter(obj) - except Exception: - return False - else: - return not isinstance(obj, str_type) - - ret = cls([]) - for k, v in other.items(): - if isinstance(v, Mapping): - ret += cls.from_dict(v, name=k) - else: - ret += cls([v], name=k, asList=is_iterable(v)) - if name is not None: - ret = cls([ret], name=name) - return ret - - asList = as_list - asDict = as_dict - getName = get_name - - -MutableMapping.register(ParseResults) -MutableSequence.register(ParseResults) diff --git a/lib/pkg_resources/_vendor/pyparsing/testing.py b/lib/pkg_resources/_vendor/pyparsing/testing.py deleted file mode 100644 index 84a0ef17..00000000 --- a/lib/pkg_resources/_vendor/pyparsing/testing.py +++ /dev/null @@ -1,331 +0,0 @@ -# testing.py - -from contextlib import contextmanager -import typing - -from .core import ( - ParserElement, - ParseException, - Keyword, - __diag__, - __compat__, -) - - -class pyparsing_test: - """ - namespace class for classes useful in writing unit tests - """ - - class reset_pyparsing_context: - """ - Context manager to be used when writing unit tests that modify pyparsing config values: - - packrat parsing - - bounded recursion parsing - - default whitespace characters. - - default keyword characters - - literal string auto-conversion class - - __diag__ settings - - Example:: - - with reset_pyparsing_context(): - # test that literals used to construct a grammar are automatically suppressed - ParserElement.inlineLiteralsUsing(Suppress) - - term = Word(alphas) | Word(nums) - group = Group('(' + term[...] + ')') - - # assert that the '()' characters are not included in the parsed tokens - self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def']) - - # after exiting context manager, literals are converted to Literal expressions again - """ - - def __init__(self): - self._save_context = {} - - def save(self): - self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS - self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS - - self._save_context[ - "literal_string_class" - ] = ParserElement._literalStringClass - - self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace - - self._save_context["packrat_enabled"] = ParserElement._packratEnabled - if ParserElement._packratEnabled: - self._save_context[ - "packrat_cache_size" - ] = ParserElement.packrat_cache.size - else: - self._save_context["packrat_cache_size"] = None - self._save_context["packrat_parse"] = ParserElement._parse - self._save_context[ - "recursion_enabled" - ] = ParserElement._left_recursion_enabled - - self._save_context["__diag__"] = { - name: getattr(__diag__, name) for name in __diag__._all_names - } - - self._save_context["__compat__"] = { - "collect_all_And_tokens": __compat__.collect_all_And_tokens - } - - return self - - def restore(self): - # reset pyparsing global state - if ( - ParserElement.DEFAULT_WHITE_CHARS - != self._save_context["default_whitespace"] - ): - ParserElement.set_default_whitespace_chars( - self._save_context["default_whitespace"] - ) - - ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"] - - Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] - ParserElement.inlineLiteralsUsing( - self._save_context["literal_string_class"] - ) - - for name, value in self._save_context["__diag__"].items(): - (__diag__.enable if value else __diag__.disable)(name) - - ParserElement._packratEnabled = False - if self._save_context["packrat_enabled"]: - ParserElement.enable_packrat(self._save_context["packrat_cache_size"]) - else: - ParserElement._parse = self._save_context["packrat_parse"] - ParserElement._left_recursion_enabled = self._save_context[ - "recursion_enabled" - ] - - __compat__.collect_all_And_tokens = self._save_context["__compat__"] - - return self - - def copy(self): - ret = type(self)() - ret._save_context.update(self._save_context) - return ret - - def __enter__(self): - return self.save() - - def __exit__(self, *args): - self.restore() - - class TestParseResultsAsserts: - """ - A mixin class to add parse results assertion methods to normal unittest.TestCase classes. - """ - - def assertParseResultsEquals( - self, result, expected_list=None, expected_dict=None, msg=None - ): - """ - Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``, - and compare any defined results names with an optional ``expected_dict``. - """ - if expected_list is not None: - self.assertEqual(expected_list, result.as_list(), msg=msg) - if expected_dict is not None: - self.assertEqual(expected_dict, result.as_dict(), msg=msg) - - def assertParseAndCheckList( - self, expr, test_string, expected_list, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ``ParseResults.asList()`` is equal to the ``expected_list``. - """ - result = expr.parse_string(test_string, parse_all=True) - if verbose: - print(result.dump()) - else: - print(result.as_list()) - self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) - - def assertParseAndCheckDict( - self, expr, test_string, expected_dict, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``. - """ - result = expr.parse_string(test_string, parseAll=True) - if verbose: - print(result.dump()) - else: - print(result.as_list()) - self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) - - def assertRunTestResults( - self, run_tests_report, expected_parse_results=None, msg=None - ): - """ - Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of - list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped - with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``. - Finally, asserts that the overall ``runTests()`` success value is ``True``. - - :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests - :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] - """ - run_test_success, run_test_results = run_tests_report - - if expected_parse_results is not None: - merged = [ - (*rpt, expected) - for rpt, expected in zip(run_test_results, expected_parse_results) - ] - for test_string, result, expected in merged: - # expected should be a tuple containing a list and/or a dict or an exception, - # and optional failure message string - # an empty tuple will skip any result validation - fail_msg = next( - (exp for exp in expected if isinstance(exp, str)), None - ) - expected_exception = next( - ( - exp - for exp in expected - if isinstance(exp, type) and issubclass(exp, Exception) - ), - None, - ) - if expected_exception is not None: - with self.assertRaises( - expected_exception=expected_exception, msg=fail_msg or msg - ): - if isinstance(result, Exception): - raise result - else: - expected_list = next( - (exp for exp in expected if isinstance(exp, list)), None - ) - expected_dict = next( - (exp for exp in expected if isinstance(exp, dict)), None - ) - if (expected_list, expected_dict) != (None, None): - self.assertParseResultsEquals( - result, - expected_list=expected_list, - expected_dict=expected_dict, - msg=fail_msg or msg, - ) - else: - # warning here maybe? - print("no validation for {!r}".format(test_string)) - - # do this last, in case some specific test results can be reported instead - self.assertTrue( - run_test_success, msg=msg if msg is not None else "failed runTests" - ) - - @contextmanager - def assertRaisesParseException(self, exc_type=ParseException, msg=None): - with self.assertRaises(exc_type, msg=msg): - yield - - @staticmethod - def with_line_numbers( - s: str, - start_line: typing.Optional[int] = None, - end_line: typing.Optional[int] = None, - expand_tabs: bool = True, - eol_mark: str = "|", - mark_spaces: typing.Optional[str] = None, - mark_control: typing.Optional[str] = None, - ) -> str: - """ - Helpful method for debugging a parser - prints a string with line and column numbers. - (Line and column numbers are 1-based.) - - :param s: tuple(bool, str - string to be printed with line and column numbers - :param start_line: int - (optional) starting line number in s to print (default=1) - :param end_line: int - (optional) ending line number in s to print (default=len(s)) - :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default - :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|") - :param mark_spaces: str - (optional) special character to display in place of spaces - :param mark_control: str - (optional) convert non-printing control characters to a placeholding - character; valid values: - - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊" - - any single character string - replace control characters with given string - - None (default) - string is displayed as-is - - :return: str - input string with leading line numbers and column number headers - """ - if expand_tabs: - s = s.expandtabs() - if mark_control is not None: - if mark_control == "unicode": - tbl = str.maketrans( - {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))} - | {127: 0x2421} - ) - eol_mark = "" - else: - tbl = str.maketrans( - {c: mark_control for c in list(range(0, 32)) + [127]} - ) - s = s.translate(tbl) - if mark_spaces is not None and mark_spaces != " ": - if mark_spaces == "unicode": - tbl = str.maketrans({9: 0x2409, 32: 0x2423}) - s = s.translate(tbl) - else: - s = s.replace(" ", mark_spaces) - if start_line is None: - start_line = 1 - if end_line is None: - end_line = len(s) - end_line = min(end_line, len(s)) - start_line = min(max(1, start_line), end_line) - - if mark_control != "unicode": - s_lines = s.splitlines()[start_line - 1 : end_line] - else: - s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]] - if not s_lines: - return "" - - lineno_width = len(str(end_line)) - max_line_len = max(len(line) for line in s_lines) - lead = " " * (lineno_width + 1) - if max_line_len >= 99: - header0 = ( - lead - + "".join( - "{}{}".format(" " * 99, (i + 1) % 100) - for i in range(max(max_line_len // 100, 1)) - ) - + "\n" - ) - else: - header0 = "" - header1 = ( - header0 - + lead - + "".join( - " {}".format((i + 1) % 10) - for i in range(-(-max_line_len // 10)) - ) - + "\n" - ) - header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n" - return ( - header1 - + header2 - + "\n".join( - "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark) - for i, line in enumerate(s_lines, start=start_line) - ) - + "\n" - ) diff --git a/lib/pkg_resources/_vendor/pyparsing/unicode.py b/lib/pkg_resources/_vendor/pyparsing/unicode.py deleted file mode 100644 index 06526203..00000000 --- a/lib/pkg_resources/_vendor/pyparsing/unicode.py +++ /dev/null @@ -1,352 +0,0 @@ -# unicode.py - -import sys -from itertools import filterfalse -from typing import List, Tuple, Union - - -class _lazyclassproperty: - def __init__(self, fn): - self.fn = fn - self.__doc__ = fn.__doc__ - self.__name__ = fn.__name__ - - def __get__(self, obj, cls): - if cls is None: - cls = type(obj) - if not hasattr(cls, "_intern") or any( - cls._intern is getattr(superclass, "_intern", []) - for superclass in cls.__mro__[1:] - ): - cls._intern = {} - attrname = self.fn.__name__ - if attrname not in cls._intern: - cls._intern[attrname] = self.fn(cls) - return cls._intern[attrname] - - -UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]] - - -class unicode_set: - """ - A set of Unicode characters, for language-specific strings for - ``alphas``, ``nums``, ``alphanums``, and ``printables``. - A unicode_set is defined by a list of ranges in the Unicode character - set, in a class attribute ``_ranges``. Ranges can be specified using - 2-tuples or a 1-tuple, such as:: - - _ranges = [ - (0x0020, 0x007e), - (0x00a0, 0x00ff), - (0x0100,), - ] - - Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x). - - A unicode set can also be defined using multiple inheritance of other unicode sets:: - - class CJK(Chinese, Japanese, Korean): - pass - """ - - _ranges: UnicodeRangeList = [] - - @_lazyclassproperty - def _chars_for_ranges(cls): - ret = [] - for cc in cls.__mro__: - if cc is unicode_set: - break - for rr in getattr(cc, "_ranges", ()): - ret.extend(range(rr[0], rr[-1] + 1)) - return [chr(c) for c in sorted(set(ret))] - - @_lazyclassproperty - def printables(cls): - "all non-whitespace characters in this range" - return "".join(filterfalse(str.isspace, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphas(cls): - "all alphabetic characters in this range" - return "".join(filter(str.isalpha, cls._chars_for_ranges)) - - @_lazyclassproperty - def nums(cls): - "all numeric digit characters in this range" - return "".join(filter(str.isdigit, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphanums(cls): - "all alphanumeric characters in this range" - return cls.alphas + cls.nums - - @_lazyclassproperty - def identchars(cls): - "all characters in this range that are valid identifier characters, plus underscore '_'" - return "".join( - sorted( - set( - "".join(filter(str.isidentifier, cls._chars_for_ranges)) - + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº" - + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" - + "_" - ) - ) - ) - - @_lazyclassproperty - def identbodychars(cls): - """ - all characters in this range that are valid identifier body characters, - plus the digits 0-9 - """ - return "".join( - sorted( - set( - cls.identchars - + "0123456789" - + "".join( - [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()] - ) - ) - ) - ) - - -class pyparsing_unicode(unicode_set): - """ - A namespace class for defining common language unicode_sets. - """ - - # fmt: off - - # define ranges in language character sets - _ranges: UnicodeRangeList = [ - (0x0020, sys.maxunicode), - ] - - class BasicMultilingualPlane(unicode_set): - "Unicode set for the Basic Multilingual Plane" - _ranges: UnicodeRangeList = [ - (0x0020, 0xFFFF), - ] - - class Latin1(unicode_set): - "Unicode set for Latin-1 Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0020, 0x007E), - (0x00A0, 0x00FF), - ] - - class LatinA(unicode_set): - "Unicode set for Latin-A Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0100, 0x017F), - ] - - class LatinB(unicode_set): - "Unicode set for Latin-B Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0180, 0x024F), - ] - - class Greek(unicode_set): - "Unicode set for Greek Unicode Character Ranges" - _ranges: UnicodeRangeList = [ - (0x0342, 0x0345), - (0x0370, 0x0377), - (0x037A, 0x037F), - (0x0384, 0x038A), - (0x038C,), - (0x038E, 0x03A1), - (0x03A3, 0x03E1), - (0x03F0, 0x03FF), - (0x1D26, 0x1D2A), - (0x1D5E,), - (0x1D60,), - (0x1D66, 0x1D6A), - (0x1F00, 0x1F15), - (0x1F18, 0x1F1D), - (0x1F20, 0x1F45), - (0x1F48, 0x1F4D), - (0x1F50, 0x1F57), - (0x1F59,), - (0x1F5B,), - (0x1F5D,), - (0x1F5F, 0x1F7D), - (0x1F80, 0x1FB4), - (0x1FB6, 0x1FC4), - (0x1FC6, 0x1FD3), - (0x1FD6, 0x1FDB), - (0x1FDD, 0x1FEF), - (0x1FF2, 0x1FF4), - (0x1FF6, 0x1FFE), - (0x2129,), - (0x2719, 0x271A), - (0xAB65,), - (0x10140, 0x1018D), - (0x101A0,), - (0x1D200, 0x1D245), - (0x1F7A1, 0x1F7A7), - ] - - class Cyrillic(unicode_set): - "Unicode set for Cyrillic Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0400, 0x052F), - (0x1C80, 0x1C88), - (0x1D2B,), - (0x1D78,), - (0x2DE0, 0x2DFF), - (0xA640, 0xA672), - (0xA674, 0xA69F), - (0xFE2E, 0xFE2F), - ] - - class Chinese(unicode_set): - "Unicode set for Chinese Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x2E80, 0x2E99), - (0x2E9B, 0x2EF3), - (0x31C0, 0x31E3), - (0x3400, 0x4DB5), - (0x4E00, 0x9FEF), - (0xA700, 0xA707), - (0xF900, 0xFA6D), - (0xFA70, 0xFAD9), - (0x16FE2, 0x16FE3), - (0x1F210, 0x1F212), - (0x1F214, 0x1F23B), - (0x1F240, 0x1F248), - (0x20000, 0x2A6D6), - (0x2A700, 0x2B734), - (0x2B740, 0x2B81D), - (0x2B820, 0x2CEA1), - (0x2CEB0, 0x2EBE0), - (0x2F800, 0x2FA1D), - ] - - class Japanese(unicode_set): - "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" - _ranges: UnicodeRangeList = [] - - class Kanji(unicode_set): - "Unicode set for Kanji Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x4E00, 0x9FBF), - (0x3000, 0x303F), - ] - - class Hiragana(unicode_set): - "Unicode set for Hiragana Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x3041, 0x3096), - (0x3099, 0x30A0), - (0x30FC,), - (0xFF70,), - (0x1B001,), - (0x1B150, 0x1B152), - (0x1F200,), - ] - - class Katakana(unicode_set): - "Unicode set for Katakana Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x3099, 0x309C), - (0x30A0, 0x30FF), - (0x31F0, 0x31FF), - (0x32D0, 0x32FE), - (0xFF65, 0xFF9F), - (0x1B000,), - (0x1B164, 0x1B167), - (0x1F201, 0x1F202), - (0x1F213,), - ] - - class Hangul(unicode_set): - "Unicode set for Hangul (Korean) Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x1100, 0x11FF), - (0x302E, 0x302F), - (0x3131, 0x318E), - (0x3200, 0x321C), - (0x3260, 0x327B), - (0x327E,), - (0xA960, 0xA97C), - (0xAC00, 0xD7A3), - (0xD7B0, 0xD7C6), - (0xD7CB, 0xD7FB), - (0xFFA0, 0xFFBE), - (0xFFC2, 0xFFC7), - (0xFFCA, 0xFFCF), - (0xFFD2, 0xFFD7), - (0xFFDA, 0xFFDC), - ] - - Korean = Hangul - - class CJK(Chinese, Japanese, Hangul): - "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" - - class Thai(unicode_set): - "Unicode set for Thai Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0E01, 0x0E3A), - (0x0E3F, 0x0E5B) - ] - - class Arabic(unicode_set): - "Unicode set for Arabic Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0600, 0x061B), - (0x061E, 0x06FF), - (0x0700, 0x077F), - ] - - class Hebrew(unicode_set): - "Unicode set for Hebrew Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0591, 0x05C7), - (0x05D0, 0x05EA), - (0x05EF, 0x05F4), - (0xFB1D, 0xFB36), - (0xFB38, 0xFB3C), - (0xFB3E,), - (0xFB40, 0xFB41), - (0xFB43, 0xFB44), - (0xFB46, 0xFB4F), - ] - - class Devanagari(unicode_set): - "Unicode set for Devanagari Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0900, 0x097F), - (0xA8E0, 0xA8FF) - ] - - # fmt: on - - -pyparsing_unicode.Japanese._ranges = ( - pyparsing_unicode.Japanese.Kanji._ranges - + pyparsing_unicode.Japanese.Hiragana._ranges - + pyparsing_unicode.Japanese.Katakana._ranges -) - -pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane - -# add language identifiers using language Unicode -pyparsing_unicode.العربية = pyparsing_unicode.Arabic -pyparsing_unicode.中文 = pyparsing_unicode.Chinese -pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic -pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek -pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew -pyparsing_unicode.日本語 = pyparsing_unicode.Japanese -pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji -pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana -pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana -pyparsing_unicode.한국어 = pyparsing_unicode.Korean -pyparsing_unicode.ไทย = pyparsing_unicode.Thai -pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari diff --git a/lib/pkg_resources/_vendor/pyparsing/util.py b/lib/pkg_resources/_vendor/pyparsing/util.py deleted file mode 100644 index 34ce092c..00000000 --- a/lib/pkg_resources/_vendor/pyparsing/util.py +++ /dev/null @@ -1,235 +0,0 @@ -# util.py -import warnings -import types -import collections -import itertools -from functools import lru_cache -from typing import List, Union, Iterable - -_bslash = chr(92) - - -class __config_flags: - """Internal class for defining compatibility and debugging flags""" - - _all_names: List[str] = [] - _fixed_names: List[str] = [] - _type_desc = "configuration" - - @classmethod - def _set(cls, dname, value): - if dname in cls._fixed_names: - warnings.warn( - "{}.{} {} is {} and cannot be overridden".format( - cls.__name__, - dname, - cls._type_desc, - str(getattr(cls, dname)).upper(), - ) - ) - return - if dname in cls._all_names: - setattr(cls, dname, value) - else: - raise ValueError("no such {} {!r}".format(cls._type_desc, dname)) - - enable = classmethod(lambda cls, name: cls._set(name, True)) - disable = classmethod(lambda cls, name: cls._set(name, False)) - - -@lru_cache(maxsize=128) -def col(loc: int, strg: str) -> int: - """ - Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See - :class:`ParserElement.parseString` for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - """ - s = strg - return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc) - - -@lru_cache(maxsize=128) -def lineno(loc: int, strg: str) -> int: - """Returns current line number within a string, counting newlines as line separators. - The first line is number 1. - - Note - the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`ParserElement.parseString` - for more information on parsing strings containing ```` s, and - suggested methods to maintain a consistent view of the parsed string, the - parse location, and line and column positions within the parsed string. - """ - return strg.count("\n", 0, loc) + 1 - - -@lru_cache(maxsize=128) -def line(loc: int, strg: str) -> str: - """ - Returns the line of text containing loc within a string, counting newlines as line separators. - """ - last_cr = strg.rfind("\n", 0, loc) - next_cr = strg.find("\n", loc) - return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :] - - -class _UnboundedCache: - def __init__(self): - cache = {} - cache_get = cache.get - self.not_in_cache = not_in_cache = object() - - def get(_, key): - return cache_get(key, not_in_cache) - - def set_(_, key, value): - cache[key] = value - - def clear(_): - cache.clear() - - self.size = None - self.get = types.MethodType(get, self) - self.set = types.MethodType(set_, self) - self.clear = types.MethodType(clear, self) - - -class _FifoCache: - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - cache = collections.OrderedDict() - cache_get = cache.get - - def get(_, key): - return cache_get(key, not_in_cache) - - def set_(_, key, value): - cache[key] = value - while len(cache) > size: - cache.popitem(last=False) - - def clear(_): - cache.clear() - - self.size = size - self.get = types.MethodType(get, self) - self.set = types.MethodType(set_, self) - self.clear = types.MethodType(clear, self) - - -class LRUMemo: - """ - A memoizing mapping that retains `capacity` deleted items - - The memo tracks retained items by their access order; once `capacity` items - are retained, the least recently used item is discarded. - """ - - def __init__(self, capacity): - self._capacity = capacity - self._active = {} - self._memory = collections.OrderedDict() - - def __getitem__(self, key): - try: - return self._active[key] - except KeyError: - self._memory.move_to_end(key) - return self._memory[key] - - def __setitem__(self, key, value): - self._memory.pop(key, None) - self._active[key] = value - - def __delitem__(self, key): - try: - value = self._active.pop(key) - except KeyError: - pass - else: - while len(self._memory) >= self._capacity: - self._memory.popitem(last=False) - self._memory[key] = value - - def clear(self): - self._active.clear() - self._memory.clear() - - -class UnboundedMemo(dict): - """ - A memoizing mapping that retains all deleted items - """ - - def __delitem__(self, key): - pass - - -def _escape_regex_range_chars(s: str) -> str: - # escape these chars: ^-[] - for c in r"\^-[]": - s = s.replace(c, _bslash + c) - s = s.replace("\n", r"\n") - s = s.replace("\t", r"\t") - return str(s) - - -def _collapse_string_to_ranges( - s: Union[str, Iterable[str]], re_escape: bool = True -) -> str: - def is_consecutive(c): - c_int = ord(c) - is_consecutive.prev, prev = c_int, is_consecutive.prev - if c_int - prev > 1: - is_consecutive.value = next(is_consecutive.counter) - return is_consecutive.value - - is_consecutive.prev = 0 - is_consecutive.counter = itertools.count() - is_consecutive.value = -1 - - def escape_re_range_char(c): - return "\\" + c if c in r"\^-][" else c - - def no_escape_re_range_char(c): - return c - - if not re_escape: - escape_re_range_char = no_escape_re_range_char - - ret = [] - s = "".join(sorted(set(s))) - if len(s) > 3: - for _, chars in itertools.groupby(s, key=is_consecutive): - first = last = next(chars) - last = collections.deque( - itertools.chain(iter([last]), chars), maxlen=1 - ).pop() - if first == last: - ret.append(escape_re_range_char(first)) - else: - sep = "" if ord(last) == ord(first) + 1 else "-" - ret.append( - "{}{}{}".format( - escape_re_range_char(first), sep, escape_re_range_char(last) - ) - ) - else: - ret = [escape_re_range_char(c) for c in s] - - return "".join(ret) - - -def _flatten(ll: list) -> list: - ret = [] - for i in ll: - if isinstance(i, list): - ret.extend(_flatten(i)) - else: - ret.append(i) - return ret diff --git a/lib/pkg_resources/_vendor/typing_extensions.py b/lib/pkg_resources/_vendor/typing_extensions.py new file mode 100644 index 00000000..ef42417c --- /dev/null +++ b/lib/pkg_resources/_vendor/typing_extensions.py @@ -0,0 +1,2209 @@ +import abc +import collections +import collections.abc +import functools +import operator +import sys +import types as _types +import typing + + +__all__ = [ + # Super-special typing primitives. + 'Any', + 'ClassVar', + 'Concatenate', + 'Final', + 'LiteralString', + 'ParamSpec', + 'ParamSpecArgs', + 'ParamSpecKwargs', + 'Self', + 'Type', + 'TypeVar', + 'TypeVarTuple', + 'Unpack', + + # ABCs (from collections.abc). + 'Awaitable', + 'AsyncIterator', + 'AsyncIterable', + 'Coroutine', + 'AsyncGenerator', + 'AsyncContextManager', + 'ChainMap', + + # Concrete collection types. + 'ContextManager', + 'Counter', + 'Deque', + 'DefaultDict', + 'NamedTuple', + 'OrderedDict', + 'TypedDict', + + # Structural checks, a.k.a. protocols. + 'SupportsIndex', + + # One-off things. + 'Annotated', + 'assert_never', + 'assert_type', + 'clear_overloads', + 'dataclass_transform', + 'get_overloads', + 'final', + 'get_args', + 'get_origin', + 'get_type_hints', + 'IntVar', + 'is_typeddict', + 'Literal', + 'NewType', + 'overload', + 'override', + 'Protocol', + 'reveal_type', + 'runtime', + 'runtime_checkable', + 'Text', + 'TypeAlias', + 'TypeGuard', + 'TYPE_CHECKING', + 'Never', + 'NoReturn', + 'Required', + 'NotRequired', +] + +# for backward compatibility +PEP_560 = True +GenericMeta = type + +# The functions below are modified copies of typing internal helpers. +# They are needed by _ProtocolMeta and they provide support for PEP 646. + +_marker = object() + + +def _check_generic(cls, parameters, elen=_marker): + """Check correct count for parameters of a generic cls (internal helper). + This gives a nice error message in case of count mismatch. + """ + if not elen: + raise TypeError(f"{cls} is not a generic class") + if elen is _marker: + if not hasattr(cls, "__parameters__") or not cls.__parameters__: + raise TypeError(f"{cls} is not a generic class") + elen = len(cls.__parameters__) + alen = len(parameters) + if alen != elen: + if hasattr(cls, "__parameters__"): + parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] + num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters) + if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples): + return + raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};" + f" actual {alen}, expected {elen}") + + +if sys.version_info >= (3, 10): + def _should_collect_from_parameters(t): + return isinstance( + t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType) + ) +elif sys.version_info >= (3, 9): + def _should_collect_from_parameters(t): + return isinstance(t, (typing._GenericAlias, _types.GenericAlias)) +else: + def _should_collect_from_parameters(t): + return isinstance(t, typing._GenericAlias) and not t._special + + +def _collect_type_vars(types, typevar_types=None): + """Collect all type variable contained in types in order of + first appearance (lexicographic order). For example:: + + _collect_type_vars((T, List[S, T])) == (T, S) + """ + if typevar_types is None: + typevar_types = typing.TypeVar + tvars = [] + for t in types: + if ( + isinstance(t, typevar_types) and + t not in tvars and + not _is_unpack(t) + ): + tvars.append(t) + if _should_collect_from_parameters(t): + tvars.extend([t for t in t.__parameters__ if t not in tvars]) + return tuple(tvars) + + +NoReturn = typing.NoReturn + +# Some unconstrained type variables. These are used by the container types. +# (These are not for export.) +T = typing.TypeVar('T') # Any type. +KT = typing.TypeVar('KT') # Key type. +VT = typing.TypeVar('VT') # Value type. +T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. +T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. + + +if sys.version_info >= (3, 11): + from typing import Any +else: + + class _AnyMeta(type): + def __instancecheck__(self, obj): + if self is Any: + raise TypeError("typing_extensions.Any cannot be used with isinstance()") + return super().__instancecheck__(obj) + + def __repr__(self): + if self is Any: + return "typing_extensions.Any" + return super().__repr__() + + class Any(metaclass=_AnyMeta): + """Special type indicating an unconstrained type. + - Any is compatible with every type. + - Any assumed to have all methods. + - All values assumed to be instances of Any. + Note that all the above statements are true from the point of view of + static type checkers. At runtime, Any should not be used with instance + checks. + """ + def __new__(cls, *args, **kwargs): + if cls is Any: + raise TypeError("Any cannot be instantiated") + return super().__new__(cls, *args, **kwargs) + + +ClassVar = typing.ClassVar + +# On older versions of typing there is an internal class named "Final". +# 3.8+ +if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7): + Final = typing.Final +# 3.7 +else: + class _FinalForm(typing._SpecialForm, _root=True): + + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type.') + return typing._GenericAlias(self, (item,)) + + Final = _FinalForm('Final', + doc="""A special typing construct to indicate that a name + cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties.""") + +if sys.version_info >= (3, 11): + final = typing.final +else: + # @final exists in 3.8+, but we backport it for all versions + # before 3.11 to keep support for the __final__ attribute. + # See https://bugs.python.org/issue46342 + def final(f): + """This decorator can be used to indicate to type checkers that + the decorated method cannot be overridden, and decorated class + cannot be subclassed. For example: + + class Base: + @final + def done(self) -> None: + ... + class Sub(Base): + def done(self) -> None: # Error reported by type checker + ... + @final + class Leaf: + ... + class Other(Leaf): # Error reported by type checker + ... + + There is no runtime checking of these properties. The decorator + sets the ``__final__`` attribute to ``True`` on the decorated object + to allow runtime introspection. + """ + try: + f.__final__ = True + except (AttributeError, TypeError): + # Skip the attribute silently if it is not writable. + # AttributeError happens if the object has __slots__ or a + # read-only property, TypeError if it's a builtin class. + pass + return f + + +def IntVar(name): + return typing.TypeVar(name) + + +# 3.8+: +if hasattr(typing, 'Literal'): + Literal = typing.Literal +# 3.7: +else: + class _LiteralForm(typing._SpecialForm, _root=True): + + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + return typing._GenericAlias(self, parameters) + + Literal = _LiteralForm('Literal', + doc="""A type that can be used to indicate to type checkers + that the corresponding value has a value literally equivalent + to the provided parameter. For example: + + var: Literal[4] = 4 + + The type checker understands that 'var' is literally equal to + the value 4 and no other value. + + Literal[...] cannot be subclassed. There is no runtime + checking verifying that the parameter is actually a value + instead of a type.""") + + +_overload_dummy = typing._overload_dummy # noqa + + +if hasattr(typing, "get_overloads"): # 3.11+ + overload = typing.overload + get_overloads = typing.get_overloads + clear_overloads = typing.clear_overloads +else: + # {module: {qualname: {firstlineno: func}}} + _overload_registry = collections.defaultdict( + functools.partial(collections.defaultdict, dict) + ) + + def overload(func): + """Decorator for overloaded functions/methods. + + In a stub file, place two or more stub definitions for the same + function in a row, each decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + + In a non-stub file (i.e. a regular .py file), do the same but + follow it with an implementation. The implementation should *not* + be decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + def utf8(value): + # implementation goes here + + The overloads for a function can be retrieved at runtime using the + get_overloads() function. + """ + # classmethod and staticmethod + f = getattr(func, "__func__", func) + try: + _overload_registry[f.__module__][f.__qualname__][ + f.__code__.co_firstlineno + ] = func + except AttributeError: + # Not a normal function; ignore. + pass + return _overload_dummy + + def get_overloads(func): + """Return all defined overloads for *func* as a sequence.""" + # classmethod and staticmethod + f = getattr(func, "__func__", func) + if f.__module__ not in _overload_registry: + return [] + mod_dict = _overload_registry[f.__module__] + if f.__qualname__ not in mod_dict: + return [] + return list(mod_dict[f.__qualname__].values()) + + def clear_overloads(): + """Clear all overloads in the registry.""" + _overload_registry.clear() + + +# This is not a real generic class. Don't use outside annotations. +Type = typing.Type + +# Various ABCs mimicking those in collections.abc. +# A few are simply re-exported for completeness. + + +Awaitable = typing.Awaitable +Coroutine = typing.Coroutine +AsyncIterable = typing.AsyncIterable +AsyncIterator = typing.AsyncIterator +Deque = typing.Deque +ContextManager = typing.ContextManager +AsyncContextManager = typing.AsyncContextManager +DefaultDict = typing.DefaultDict + +# 3.7.2+ +if hasattr(typing, 'OrderedDict'): + OrderedDict = typing.OrderedDict +# 3.7.0-3.7.2 +else: + OrderedDict = typing._alias(collections.OrderedDict, (KT, VT)) + +Counter = typing.Counter +ChainMap = typing.ChainMap +AsyncGenerator = typing.AsyncGenerator +NewType = typing.NewType +Text = typing.Text +TYPE_CHECKING = typing.TYPE_CHECKING + + +_PROTO_WHITELIST = ['Callable', 'Awaitable', + 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator', + 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', + 'ContextManager', 'AsyncContextManager'] + + +def _get_protocol_attrs(cls): + attrs = set() + for base in cls.__mro__[:-1]: # without object + if base.__name__ in ('Protocol', 'Generic'): + continue + annotations = getattr(base, '__annotations__', {}) + for attr in list(base.__dict__.keys()) + list(annotations.keys()): + if (not attr.startswith('_abc_') and attr not in ( + '__abstractmethods__', '__annotations__', '__weakref__', + '_is_protocol', '_is_runtime_protocol', '__dict__', + '__args__', '__slots__', + '__next_in_mro__', '__parameters__', '__origin__', + '__orig_bases__', '__extra__', '__tree_hash__', + '__doc__', '__subclasshook__', '__init__', '__new__', + '__module__', '_MutableMapping__marker', '_gorg')): + attrs.add(attr) + return attrs + + +def _is_callable_members_only(cls): + return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls)) + + +def _maybe_adjust_parameters(cls): + """Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__. + + The contents of this function are very similar + to logic found in typing.Generic.__init_subclass__ + on the CPython main branch. + """ + tvars = [] + if '__orig_bases__' in cls.__dict__: + tvars = typing._collect_type_vars(cls.__orig_bases__) + # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn]. + # If found, tvars must be a subset of it. + # If not found, tvars is it. + # Also check for and reject plain Generic, + # and reject multiple Generic[...] and/or Protocol[...]. + gvars = None + for base in cls.__orig_bases__: + if (isinstance(base, typing._GenericAlias) and + base.__origin__ in (typing.Generic, Protocol)): + # for error messages + the_base = base.__origin__.__name__ + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...]" + " and/or Protocol[...] multiple types.") + gvars = base.__parameters__ + if gvars is None: + gvars = tvars + else: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) + s_args = ', '.join(str(g) for g in gvars) + raise TypeError(f"Some type variables ({s_vars}) are" + f" not listed in {the_base}[{s_args}]") + tvars = gvars + cls.__parameters__ = tuple(tvars) + + +# 3.8+ +if hasattr(typing, 'Protocol'): + Protocol = typing.Protocol +# 3.7 +else: + + def _no_init(self, *args, **kwargs): + if type(self)._is_protocol: + raise TypeError('Protocols cannot be instantiated') + + class _ProtocolMeta(abc.ABCMeta): # noqa: B024 + # This metaclass is a bit unfortunate and exists only because of the lack + # of __instancehook__. + def __instancecheck__(cls, instance): + # We need this method for situations where attributes are + # assigned in __init__. + if ((not getattr(cls, '_is_protocol', False) or + _is_callable_members_only(cls)) and + issubclass(instance.__class__, cls)): + return True + if cls._is_protocol: + if all(hasattr(instance, attr) and + (not callable(getattr(cls, attr, None)) or + getattr(instance, attr) is not None) + for attr in _get_protocol_attrs(cls)): + return True + return super().__instancecheck__(instance) + + class Protocol(metaclass=_ProtocolMeta): + # There is quite a lot of overlapping code with typing.Generic. + # Unfortunately it is hard to avoid this while these live in two different + # modules. The duplicated code will be removed when Protocol is moved to typing. + """Base class for protocol classes. Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing), for example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing_extensions.runtime act as simple-minded runtime protocol that checks + only the presence of given attributes, ignoring their type signatures. + + Protocol classes can be generic, they are defined as:: + + class GenProto(Protocol[T]): + def meth(self) -> T: + ... + """ + __slots__ = () + _is_protocol = True + + def __new__(cls, *args, **kwds): + if cls is Protocol: + raise TypeError("Type Protocol cannot be instantiated; " + "it can only be used as a base class") + return super().__new__(cls) + + @typing._tp_cache + def __class_getitem__(cls, params): + if not isinstance(params, tuple): + params = (params,) + if not params and cls is not typing.Tuple: + raise TypeError( + f"Parameter list to {cls.__qualname__}[...] cannot be empty") + msg = "Parameters to generic types must be types." + params = tuple(typing._type_check(p, msg) for p in params) # noqa + if cls is Protocol: + # Generic can only be subscripted with unique type variables. + if not all(isinstance(p, typing.TypeVar) for p in params): + i = 0 + while isinstance(params[i], typing.TypeVar): + i += 1 + raise TypeError( + "Parameters to Protocol[...] must all be type variables." + f" Parameter {i + 1} is {params[i]}") + if len(set(params)) != len(params): + raise TypeError( + "Parameters to Protocol[...] must all be unique") + else: + # Subscripting a regular Generic subclass. + _check_generic(cls, params, len(cls.__parameters__)) + return typing._GenericAlias(cls, params) + + def __init_subclass__(cls, *args, **kwargs): + if '__orig_bases__' in cls.__dict__: + error = typing.Generic in cls.__orig_bases__ + else: + error = typing.Generic in cls.__bases__ + if error: + raise TypeError("Cannot inherit from plain Generic") + _maybe_adjust_parameters(cls) + + # Determine if this is a protocol or a concrete subclass. + if not cls.__dict__.get('_is_protocol', None): + cls._is_protocol = any(b is Protocol for b in cls.__bases__) + + # Set (or override) the protocol subclass hook. + def _proto_hook(other): + if not cls.__dict__.get('_is_protocol', None): + return NotImplemented + if not getattr(cls, '_is_runtime_protocol', False): + if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']: + return NotImplemented + raise TypeError("Instance and class checks can only be used with" + " @runtime protocols") + if not _is_callable_members_only(cls): + if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']: + return NotImplemented + raise TypeError("Protocols with non-method members" + " don't support issubclass()") + if not isinstance(other, type): + # Same error as for issubclass(1, int) + raise TypeError('issubclass() arg 1 must be a class') + for attr in _get_protocol_attrs(cls): + for base in other.__mro__: + if attr in base.__dict__: + if base.__dict__[attr] is None: + return NotImplemented + break + annotations = getattr(base, '__annotations__', {}) + if (isinstance(annotations, typing.Mapping) and + attr in annotations and + isinstance(other, _ProtocolMeta) and + other._is_protocol): + break + else: + return NotImplemented + return True + if '__subclasshook__' not in cls.__dict__: + cls.__subclasshook__ = _proto_hook + + # We have nothing more to do for non-protocols. + if not cls._is_protocol: + return + + # Check consistency of bases. + for base in cls.__bases__: + if not (base in (object, typing.Generic) or + base.__module__ == 'collections.abc' and + base.__name__ in _PROTO_WHITELIST or + isinstance(base, _ProtocolMeta) and base._is_protocol): + raise TypeError('Protocols can only inherit from other' + f' protocols, got {repr(base)}') + cls.__init__ = _no_init + + +# 3.8+ +if hasattr(typing, 'runtime_checkable'): + runtime_checkable = typing.runtime_checkable +# 3.7 +else: + def runtime_checkable(cls): + """Mark a protocol class as a runtime protocol, so that it + can be used with isinstance() and issubclass(). Raise TypeError + if applied to a non-protocol class. + + This allows a simple-minded structural check very similar to the + one-offs in collections.abc such as Hashable. + """ + if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol: + raise TypeError('@runtime_checkable can be only applied to protocol classes,' + f' got {cls!r}') + cls._is_runtime_protocol = True + return cls + + +# Exists for backwards compatibility. +runtime = runtime_checkable + + +# 3.8+ +if hasattr(typing, 'SupportsIndex'): + SupportsIndex = typing.SupportsIndex +# 3.7 +else: + @runtime_checkable + class SupportsIndex(Protocol): + __slots__ = () + + @abc.abstractmethod + def __index__(self) -> int: + pass + + +if hasattr(typing, "Required"): + # The standard library TypedDict in Python 3.8 does not store runtime information + # about which (if any) keys are optional. See https://bugs.python.org/issue38834 + # The standard library TypedDict in Python 3.9.0/1 does not honour the "total" + # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059 + # The standard library TypedDict below Python 3.11 does not store runtime + # information about optional and required keys when using Required or NotRequired. + # Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11. + TypedDict = typing.TypedDict + _TypedDictMeta = typing._TypedDictMeta + is_typeddict = typing.is_typeddict +else: + def _check_fails(cls, other): + try: + if sys._getframe(1).f_globals['__name__'] not in ['abc', + 'functools', + 'typing']: + # Typed dicts are only for static structural subtyping. + raise TypeError('TypedDict does not support instance and class checks') + except (AttributeError, ValueError): + pass + return False + + def _dict_new(*args, **kwargs): + if not args: + raise TypeError('TypedDict.__new__(): not enough arguments') + _, args = args[0], args[1:] # allow the "cls" keyword be passed + return dict(*args, **kwargs) + + _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)' + + def _typeddict_new(*args, total=True, **kwargs): + if not args: + raise TypeError('TypedDict.__new__(): not enough arguments') + _, args = args[0], args[1:] # allow the "cls" keyword be passed + if args: + typename, args = args[0], args[1:] # allow the "_typename" keyword be passed + elif '_typename' in kwargs: + typename = kwargs.pop('_typename') + import warnings + warnings.warn("Passing '_typename' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + raise TypeError("TypedDict.__new__() missing 1 required positional " + "argument: '_typename'") + if args: + try: + fields, = args # allow the "_fields" keyword be passed + except ValueError: + raise TypeError('TypedDict.__new__() takes from 2 to 3 ' + f'positional arguments but {len(args) + 2} ' + 'were given') + elif '_fields' in kwargs and len(kwargs) == 1: + fields = kwargs.pop('_fields') + import warnings + warnings.warn("Passing '_fields' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + fields = None + + if fields is None: + fields = kwargs + elif kwargs: + raise TypeError("TypedDict takes either a dict or keyword arguments," + " but not both") + + ns = {'__annotations__': dict(fields)} + try: + # Setting correct module is necessary to make typed dict classes pickleable. + ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + + return _TypedDictMeta(typename, (), ns, total=total) + + _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,' + ' /, *, total=True, **kwargs)') + + class _TypedDictMeta(type): + def __init__(cls, name, bases, ns, total=True): + super().__init__(name, bases, ns) + + def __new__(cls, name, bases, ns, total=True): + # Create new typed dict class object. + # This method is called directly when TypedDict is subclassed, + # or via _typeddict_new when TypedDict is instantiated. This way + # TypedDict supports all three syntaxes described in its docstring. + # Subclasses and instances of TypedDict return actual dictionaries + # via _dict_new. + ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new + # Don't insert typing.Generic into __bases__ here, + # or Generic.__init_subclass__ will raise TypeError + # in the super().__new__() call. + # Instead, monkey-patch __bases__ onto the class after it's been created. + tp_dict = super().__new__(cls, name, (dict,), ns) + + if any(issubclass(base, typing.Generic) for base in bases): + tp_dict.__bases__ = (typing.Generic, dict) + _maybe_adjust_parameters(tp_dict) + + annotations = {} + own_annotations = ns.get('__annotations__', {}) + msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" + own_annotations = { + n: typing._type_check(tp, msg) for n, tp in own_annotations.items() + } + required_keys = set() + optional_keys = set() + + for base in bases: + annotations.update(base.__dict__.get('__annotations__', {})) + required_keys.update(base.__dict__.get('__required_keys__', ())) + optional_keys.update(base.__dict__.get('__optional_keys__', ())) + + annotations.update(own_annotations) + for annotation_key, annotation_type in own_annotations.items(): + annotation_origin = get_origin(annotation_type) + if annotation_origin is Annotated: + annotation_args = get_args(annotation_type) + if annotation_args: + annotation_type = annotation_args[0] + annotation_origin = get_origin(annotation_type) + + if annotation_origin is Required: + required_keys.add(annotation_key) + elif annotation_origin is NotRequired: + optional_keys.add(annotation_key) + elif total: + required_keys.add(annotation_key) + else: + optional_keys.add(annotation_key) + + tp_dict.__annotations__ = annotations + tp_dict.__required_keys__ = frozenset(required_keys) + tp_dict.__optional_keys__ = frozenset(optional_keys) + if not hasattr(tp_dict, '__total__'): + tp_dict.__total__ = total + return tp_dict + + __instancecheck__ = __subclasscheck__ = _check_fails + + TypedDict = _TypedDictMeta('TypedDict', (dict,), {}) + TypedDict.__module__ = __name__ + TypedDict.__doc__ = \ + """A simple typed name space. At runtime it is equivalent to a plain dict. + + TypedDict creates a dictionary type that expects all of its + instances to have a certain set of keys, with each key + associated with a value of a consistent type. This expectation + is not checked at runtime but is only enforced by type checkers. + Usage:: + + class Point2D(TypedDict): + x: int + y: int + label: str + + a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK + b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check + + assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') + + The type info can be accessed via the Point2D.__annotations__ dict, and + the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. + TypedDict supports two additional equivalent forms:: + + Point2D = TypedDict('Point2D', x=int, y=int, label=str) + Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) + + The class syntax is only supported in Python 3.6+, while two other + syntax forms work for Python 2.7 and 3.2+ + """ + + if hasattr(typing, "_TypedDictMeta"): + _TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta) + else: + _TYPEDDICT_TYPES = (_TypedDictMeta,) + + def is_typeddict(tp): + """Check if an annotation is a TypedDict class + + For example:: + class Film(TypedDict): + title: str + year: int + + is_typeddict(Film) # => True + is_typeddict(Union[list, str]) # => False + """ + return isinstance(tp, tuple(_TYPEDDICT_TYPES)) + + +if hasattr(typing, "assert_type"): + assert_type = typing.assert_type + +else: + def assert_type(__val, __typ): + """Assert (to the type checker) that the value is of the given type. + + When the type checker encounters a call to assert_type(), it + emits an error if the value is not of the specified type:: + + def greet(name: str) -> None: + assert_type(name, str) # ok + assert_type(name, int) # type checker error + + At runtime this returns the first argument unchanged and otherwise + does nothing. + """ + return __val + + +if hasattr(typing, "Required"): + get_type_hints = typing.get_type_hints +else: + import functools + import types + + # replaces _strip_annotations() + def _strip_extras(t): + """Strips Annotated, Required and NotRequired from a given type.""" + if isinstance(t, _AnnotatedAlias): + return _strip_extras(t.__origin__) + if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired): + return _strip_extras(t.__args__[0]) + if isinstance(t, typing._GenericAlias): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return t.copy_with(stripped_args) + if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return types.GenericAlias(t.__origin__, stripped_args) + if hasattr(types, "UnionType") and isinstance(t, types.UnionType): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return functools.reduce(operator.or_, stripped_args) + + return t + + def get_type_hints(obj, globalns=None, localns=None, include_extras=False): + """Return type hints for an object. + + This is often the same as obj.__annotations__, but it handles + forward references encoded as string literals, adds Optional[t] if a + default value equal to None is set and recursively replaces all + 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T' + (unless 'include_extras=True'). + + The argument may be a module, class, method, or function. The annotations + are returned as a dictionary. For classes, annotations include also + inherited members. + + TypeError is raised if the argument is not of a type that can contain + annotations, and an empty dictionary is returned if no annotations are + present. + + BEWARE -- the behavior of globalns and localns is counterintuitive + (unless you are familiar with how eval() and exec() work). The + search order is locals first, then globals. + + - If no dict arguments are passed, an attempt is made to use the + globals from obj (or the respective module's globals for classes), + and these are also used as the locals. If the object does not appear + to have globals, an empty dictionary is used. + + - If one dict argument is passed, it is used for both globals and + locals. + + - If two dict arguments are passed, they specify globals and + locals, respectively. + """ + if hasattr(typing, "Annotated"): + hint = typing.get_type_hints( + obj, globalns=globalns, localns=localns, include_extras=True + ) + else: + hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) + if include_extras: + return hint + return {k: _strip_extras(t) for k, t in hint.items()} + + +# Python 3.9+ has PEP 593 (Annotated) +if hasattr(typing, 'Annotated'): + Annotated = typing.Annotated + # Not exported and not a public API, but needed for get_origin() and get_args() + # to work. + _AnnotatedAlias = typing._AnnotatedAlias +# 3.7-3.8 +else: + class _AnnotatedAlias(typing._GenericAlias, _root=True): + """Runtime representation of an annotated type. + + At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' + with extra annotations. The alias behaves like a normal typing alias, + instantiating is the same as instantiating the underlying type, binding + it to types is also the same. + """ + def __init__(self, origin, metadata): + if isinstance(origin, _AnnotatedAlias): + metadata = origin.__metadata__ + metadata + origin = origin.__origin__ + super().__init__(origin, origin) + self.__metadata__ = metadata + + def copy_with(self, params): + assert len(params) == 1 + new_type = params[0] + return _AnnotatedAlias(new_type, self.__metadata__) + + def __repr__(self): + return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, " + f"{', '.join(repr(a) for a in self.__metadata__)}]") + + def __reduce__(self): + return operator.getitem, ( + Annotated, (self.__origin__,) + self.__metadata__ + ) + + def __eq__(self, other): + if not isinstance(other, _AnnotatedAlias): + return NotImplemented + if self.__origin__ != other.__origin__: + return False + return self.__metadata__ == other.__metadata__ + + def __hash__(self): + return hash((self.__origin__, self.__metadata__)) + + class Annotated: + """Add context specific metadata to a type. + + Example: Annotated[int, runtime_check.Unsigned] indicates to the + hypothetical runtime_check module that this type is an unsigned int. + Every other consumer of this type can ignore this metadata and treat + this type as int. + + The first argument to Annotated must be a valid type (and will be in + the __origin__ field), the remaining arguments are kept as a tuple in + the __extra__ field. + + Details: + + - It's an error to call `Annotated` with less than two arguments. + - Nested Annotated are flattened:: + + Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] + + - Instantiating an annotated type is equivalent to instantiating the + underlying type:: + + Annotated[C, Ann1](5) == C(5) + + - Annotated can be used as a generic type alias:: + + Optimized = Annotated[T, runtime.Optimize()] + Optimized[int] == Annotated[int, runtime.Optimize()] + + OptimizedList = Annotated[List[T], runtime.Optimize()] + OptimizedList[int] == Annotated[List[int], runtime.Optimize()] + """ + + __slots__ = () + + def __new__(cls, *args, **kwargs): + raise TypeError("Type Annotated cannot be instantiated.") + + @typing._tp_cache + def __class_getitem__(cls, params): + if not isinstance(params, tuple) or len(params) < 2: + raise TypeError("Annotated[...] should be used " + "with at least two arguments (a type and an " + "annotation).") + allowed_special_forms = (ClassVar, Final) + if get_origin(params[0]) in allowed_special_forms: + origin = params[0] + else: + msg = "Annotated[t, ...]: t must be a type." + origin = typing._type_check(params[0], msg) + metadata = tuple(params[1:]) + return _AnnotatedAlias(origin, metadata) + + def __init_subclass__(cls, *args, **kwargs): + raise TypeError( + f"Cannot subclass {cls.__module__}.Annotated" + ) + +# Python 3.8 has get_origin() and get_args() but those implementations aren't +# Annotated-aware, so we can't use those. Python 3.9's versions don't support +# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do. +if sys.version_info[:2] >= (3, 10): + get_origin = typing.get_origin + get_args = typing.get_args +# 3.7-3.9 +else: + try: + # 3.9+ + from typing import _BaseGenericAlias + except ImportError: + _BaseGenericAlias = typing._GenericAlias + try: + # 3.9+ + from typing import GenericAlias as _typing_GenericAlias + except ImportError: + _typing_GenericAlias = typing._GenericAlias + + def get_origin(tp): + """Get the unsubscripted version of a type. + + This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar + and Annotated. Return None for unsupported types. Examples:: + + get_origin(Literal[42]) is Literal + get_origin(int) is None + get_origin(ClassVar[int]) is ClassVar + get_origin(Generic) is Generic + get_origin(Generic[T]) is Generic + get_origin(Union[T, int]) is Union + get_origin(List[Tuple[T, T]][int]) == list + get_origin(P.args) is P + """ + if isinstance(tp, _AnnotatedAlias): + return Annotated + if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias, + ParamSpecArgs, ParamSpecKwargs)): + return tp.__origin__ + if tp is typing.Generic: + return typing.Generic + return None + + def get_args(tp): + """Get type arguments with all substitutions performed. + + For unions, basic simplifications used by Union constructor are performed. + Examples:: + get_args(Dict[str, int]) == (str, int) + get_args(int) == () + get_args(Union[int, Union[T, int], str][int]) == (int, str) + get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) + get_args(Callable[[], T][int]) == ([], int) + """ + if isinstance(tp, _AnnotatedAlias): + return (tp.__origin__,) + tp.__metadata__ + if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)): + if getattr(tp, "_special", False): + return () + res = tp.__args__ + if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis: + res = (list(res[:-1]), res[-1]) + return res + return () + + +# 3.10+ +if hasattr(typing, 'TypeAlias'): + TypeAlias = typing.TypeAlias +# 3.9 +elif sys.version_info[:2] >= (3, 9): + class _TypeAliasForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + @_TypeAliasForm + def TypeAlias(self, parameters): + """Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example above. + """ + raise TypeError(f"{self} is not subscriptable") +# 3.7-3.8 +else: + class _TypeAliasForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + TypeAlias = _TypeAliasForm('TypeAlias', + doc="""Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example + above.""") + + +class _DefaultMixin: + """Mixin for TypeVarLike defaults.""" + + __slots__ = () + + def __init__(self, default): + if isinstance(default, (tuple, list)): + self.__default__ = tuple((typing._type_check(d, "Default must be a type") + for d in default)) + elif default: + self.__default__ = typing._type_check(default, "Default must be a type") + else: + self.__default__ = None + + +# Add default and infer_variance parameters from PEP 696 and 695 +class TypeVar(typing.TypeVar, _DefaultMixin, _root=True): + """Type variable.""" + + __module__ = 'typing' + + def __init__(self, name, *constraints, bound=None, + covariant=False, contravariant=False, + default=None, infer_variance=False): + super().__init__(name, *constraints, bound=bound, covariant=covariant, + contravariant=contravariant) + _DefaultMixin.__init__(self, default) + self.__infer_variance__ = infer_variance + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + + +# Python 3.10+ has PEP 612 +if hasattr(typing, 'ParamSpecArgs'): + ParamSpecArgs = typing.ParamSpecArgs + ParamSpecKwargs = typing.ParamSpecKwargs +# 3.7-3.9 +else: + class _Immutable: + """Mixin to indicate that object should not be copied.""" + __slots__ = () + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + class ParamSpecArgs(_Immutable): + """The args for a ParamSpec object. + + Given a ParamSpec object P, P.args is an instance of ParamSpecArgs. + + ParamSpecArgs objects have a reference back to their ParamSpec: + + P.args.__origin__ is P + + This type is meant for runtime introspection and has no special meaning to + static type checkers. + """ + def __init__(self, origin): + self.__origin__ = origin + + def __repr__(self): + return f"{self.__origin__.__name__}.args" + + def __eq__(self, other): + if not isinstance(other, ParamSpecArgs): + return NotImplemented + return self.__origin__ == other.__origin__ + + class ParamSpecKwargs(_Immutable): + """The kwargs for a ParamSpec object. + + Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs. + + ParamSpecKwargs objects have a reference back to their ParamSpec: + + P.kwargs.__origin__ is P + + This type is meant for runtime introspection and has no special meaning to + static type checkers. + """ + def __init__(self, origin): + self.__origin__ = origin + + def __repr__(self): + return f"{self.__origin__.__name__}.kwargs" + + def __eq__(self, other): + if not isinstance(other, ParamSpecKwargs): + return NotImplemented + return self.__origin__ == other.__origin__ + +# 3.10+ +if hasattr(typing, 'ParamSpec'): + + # Add default Parameter - PEP 696 + class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True): + """Parameter specification variable.""" + + __module__ = 'typing' + + def __init__(self, name, *, bound=None, covariant=False, contravariant=False, + default=None): + super().__init__(name, bound=bound, covariant=covariant, + contravariant=contravariant) + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + +# 3.7-3.9 +else: + + # Inherits from list as a workaround for Callable checks in Python < 3.9.2. + class ParamSpec(list, _DefaultMixin): + """Parameter specification variable. + + Usage:: + + P = ParamSpec('P') + + Parameter specification variables exist primarily for the benefit of static + type checkers. They are used to forward the parameter types of one + callable to another callable, a pattern commonly found in higher order + functions and decorators. They are only valid when used in ``Concatenate``, + or s the first argument to ``Callable``. In Python 3.10 and higher, + they are also supported in user-defined Generics at runtime. + See class Generic for more information on generic types. An + example for annotating a decorator:: + + T = TypeVar('T') + P = ParamSpec('P') + + def add_logging(f: Callable[P, T]) -> Callable[P, T]: + '''A type-safe decorator to add logging to a function.''' + def inner(*args: P.args, **kwargs: P.kwargs) -> T: + logging.info(f'{f.__name__} was called') + return f(*args, **kwargs) + return inner + + @add_logging + def add_two(x: float, y: float) -> float: + '''Add two numbers together.''' + return x + y + + Parameter specification variables defined with covariant=True or + contravariant=True can be used to declare covariant or contravariant + generic types. These keyword arguments are valid, but their actual semantics + are yet to be decided. See PEP 612 for details. + + Parameter specification variables can be introspected. e.g.: + + P.__name__ == 'T' + P.__bound__ == None + P.__covariant__ == False + P.__contravariant__ == False + + Note that only parameter specification variables defined in global scope can + be pickled. + """ + + # Trick Generic __parameters__. + __class__ = typing.TypeVar + + @property + def args(self): + return ParamSpecArgs(self) + + @property + def kwargs(self): + return ParamSpecKwargs(self) + + def __init__(self, name, *, bound=None, covariant=False, contravariant=False, + default=None): + super().__init__([self]) + self.__name__ = name + self.__covariant__ = bool(covariant) + self.__contravariant__ = bool(contravariant) + if bound: + self.__bound__ = typing._type_check(bound, 'Bound must be a type.') + else: + self.__bound__ = None + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + + def __repr__(self): + if self.__covariant__: + prefix = '+' + elif self.__contravariant__: + prefix = '-' + else: + prefix = '~' + return prefix + self.__name__ + + def __hash__(self): + return object.__hash__(self) + + def __eq__(self, other): + return self is other + + def __reduce__(self): + return self.__name__ + + # Hack to get typing._type_check to pass. + def __call__(self, *args, **kwargs): + pass + + +# 3.7-3.9 +if not hasattr(typing, 'Concatenate'): + # Inherits from list as a workaround for Callable checks in Python < 3.9.2. + class _ConcatenateGenericAlias(list): + + # Trick Generic into looking into this for __parameters__. + __class__ = typing._GenericAlias + + # Flag in 3.8. + _special = False + + def __init__(self, origin, args): + super().__init__(args) + self.__origin__ = origin + self.__args__ = args + + def __repr__(self): + _type_repr = typing._type_repr + return (f'{_type_repr(self.__origin__)}' + f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]') + + def __hash__(self): + return hash((self.__origin__, self.__args__)) + + # Hack to get typing._type_check to pass in Generic. + def __call__(self, *args, **kwargs): + pass + + @property + def __parameters__(self): + return tuple( + tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec)) + ) + + +# 3.7-3.9 +@typing._tp_cache +def _concatenate_getitem(self, parameters): + if parameters == (): + raise TypeError("Cannot take a Concatenate of no types.") + if not isinstance(parameters, tuple): + parameters = (parameters,) + if not isinstance(parameters[-1], ParamSpec): + raise TypeError("The last parameter to Concatenate should be a " + "ParamSpec variable.") + msg = "Concatenate[arg, ...]: each arg must be a type." + parameters = tuple(typing._type_check(p, msg) for p in parameters) + return _ConcatenateGenericAlias(self, parameters) + + +# 3.10+ +if hasattr(typing, 'Concatenate'): + Concatenate = typing.Concatenate + _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa +# 3.9 +elif sys.version_info[:2] >= (3, 9): + @_TypeAliasForm + def Concatenate(self, parameters): + """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """ + return _concatenate_getitem(self, parameters) +# 3.7-8 +else: + class _ConcatenateForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + return _concatenate_getitem(self, parameters) + + Concatenate = _ConcatenateForm( + 'Concatenate', + doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """) + +# 3.10+ +if hasattr(typing, 'TypeGuard'): + TypeGuard = typing.TypeGuard +# 3.9 +elif sys.version_info[:2] >= (3, 9): + class _TypeGuardForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + @_TypeGuardForm + def TypeGuard(self, parameters): + """Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeGuard`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. + + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """ + item = typing._type_check(parameters, f'{self} accepts only a single type.') + return typing._GenericAlias(self, (item,)) +# 3.7-3.8 +else: + class _TypeGuardForm(typing._SpecialForm, _root=True): + + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type') + return typing._GenericAlias(self, (item,)) + + TypeGuard = _TypeGuardForm( + 'TypeGuard', + doc="""Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeGuard`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. + + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """) + + +# Vendored from cpython typing._SpecialFrom +class _SpecialForm(typing._Final, _root=True): + __slots__ = ('_name', '__doc__', '_getitem') + + def __init__(self, getitem): + self._getitem = getitem + self._name = getitem.__name__ + self.__doc__ = getitem.__doc__ + + def __getattr__(self, item): + if item in {'__name__', '__qualname__'}: + return self._name + + raise AttributeError(item) + + def __mro_entries__(self, bases): + raise TypeError(f"Cannot subclass {self!r}") + + def __repr__(self): + return f'typing_extensions.{self._name}' + + def __reduce__(self): + return self._name + + def __call__(self, *args, **kwds): + raise TypeError(f"Cannot instantiate {self!r}") + + def __or__(self, other): + return typing.Union[self, other] + + def __ror__(self, other): + return typing.Union[other, self] + + def __instancecheck__(self, obj): + raise TypeError(f"{self} cannot be used with isinstance()") + + def __subclasscheck__(self, cls): + raise TypeError(f"{self} cannot be used with issubclass()") + + @typing._tp_cache + def __getitem__(self, parameters): + return self._getitem(self, parameters) + + +if hasattr(typing, "LiteralString"): + LiteralString = typing.LiteralString +else: + @_SpecialForm + def LiteralString(self, params): + """Represents an arbitrary literal string. + + Example:: + + from typing_extensions import LiteralString + + def query(sql: LiteralString) -> ...: + ... + + query("SELECT * FROM table") # ok + query(f"SELECT * FROM {input()}") # not ok + + See PEP 675 for details. + + """ + raise TypeError(f"{self} is not subscriptable") + + +if hasattr(typing, "Self"): + Self = typing.Self +else: + @_SpecialForm + def Self(self, params): + """Used to spell the type of "self" in classes. + + Example:: + + from typing import Self + + class ReturnsSelf: + def parse(self, data: bytes) -> Self: + ... + return self + + """ + + raise TypeError(f"{self} is not subscriptable") + + +if hasattr(typing, "Never"): + Never = typing.Never +else: + @_SpecialForm + def Never(self, params): + """The bottom type, a type that has no members. + + This can be used to define a function that should never be + called, or a function that never returns:: + + from typing_extensions import Never + + def never_call_me(arg: Never) -> None: + pass + + def int_or_str(arg: int | str) -> None: + never_call_me(arg) # type checker error + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + never_call_me(arg) # ok, arg is of type Never + + """ + + raise TypeError(f"{self} is not subscriptable") + + +if hasattr(typing, 'Required'): + Required = typing.Required + NotRequired = typing.NotRequired +elif sys.version_info[:2] >= (3, 9): + class _ExtensionsSpecialForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + @_ExtensionsSpecialForm + def Required(self, parameters): + """A special typing construct to mark a key of a total=False TypedDict + as required. For example: + + class Movie(TypedDict, total=False): + title: Required[str] + year: int + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + + There is no runtime checking that a required key is actually provided + when instantiating a related TypedDict. + """ + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + return typing._GenericAlias(self, (item,)) + + @_ExtensionsSpecialForm + def NotRequired(self, parameters): + """A special typing construct to mark a key of a TypedDict as + potentially missing. For example: + + class Movie(TypedDict): + title: str + year: NotRequired[int] + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + """ + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + return typing._GenericAlias(self, (item,)) + +else: + class _RequiredForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type.') + return typing._GenericAlias(self, (item,)) + + Required = _RequiredForm( + 'Required', + doc="""A special typing construct to mark a key of a total=False TypedDict + as required. For example: + + class Movie(TypedDict, total=False): + title: Required[str] + year: int + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + + There is no runtime checking that a required key is actually provided + when instantiating a related TypedDict. + """) + NotRequired = _RequiredForm( + 'NotRequired', + doc="""A special typing construct to mark a key of a TypedDict as + potentially missing. For example: + + class Movie(TypedDict): + title: str + year: NotRequired[int] + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + """) + + +if hasattr(typing, "Unpack"): # 3.11+ + Unpack = typing.Unpack +elif sys.version_info[:2] >= (3, 9): + class _UnpackSpecialForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + class _UnpackAlias(typing._GenericAlias, _root=True): + __class__ = typing.TypeVar + + @_UnpackSpecialForm + def Unpack(self, parameters): + """A special typing construct to unpack a variadic type. For example: + + Shape = TypeVarTuple('Shape') + Batch = NewType('Batch', int) + + def add_batch_axis( + x: Array[Unpack[Shape]] + ) -> Array[Batch, Unpack[Shape]]: ... + + """ + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + return _UnpackAlias(self, (item,)) + + def _is_unpack(obj): + return isinstance(obj, _UnpackAlias) + +else: + class _UnpackAlias(typing._GenericAlias, _root=True): + __class__ = typing.TypeVar + + class _UnpackForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type.') + return _UnpackAlias(self, (item,)) + + Unpack = _UnpackForm( + 'Unpack', + doc="""A special typing construct to unpack a variadic type. For example: + + Shape = TypeVarTuple('Shape') + Batch = NewType('Batch', int) + + def add_batch_axis( + x: Array[Unpack[Shape]] + ) -> Array[Batch, Unpack[Shape]]: ... + + """) + + def _is_unpack(obj): + return isinstance(obj, _UnpackAlias) + + +if hasattr(typing, "TypeVarTuple"): # 3.11+ + + # Add default Parameter - PEP 696 + class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True): + """Type variable tuple.""" + + def __init__(self, name, *, default=None): + super().__init__(name) + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + +else: + class TypeVarTuple(_DefaultMixin): + """Type variable tuple. + + Usage:: + + Ts = TypeVarTuple('Ts') + + In the same way that a normal type variable is a stand-in for a single + type such as ``int``, a type variable *tuple* is a stand-in for a *tuple* + type such as ``Tuple[int, str]``. + + Type variable tuples can be used in ``Generic`` declarations. + Consider the following example:: + + class Array(Generic[*Ts]): ... + + The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``, + where ``T1`` and ``T2`` are type variables. To use these type variables + as type parameters of ``Array``, we must *unpack* the type variable tuple using + the star operator: ``*Ts``. The signature of ``Array`` then behaves + as if we had simply written ``class Array(Generic[T1, T2]): ...``. + In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows + us to parameterise the class with an *arbitrary* number of type parameters. + + Type variable tuples can be used anywhere a normal ``TypeVar`` can. + This includes class definitions, as shown above, as well as function + signatures and variable annotations:: + + class Array(Generic[*Ts]): + + def __init__(self, shape: Tuple[*Ts]): + self._shape: Tuple[*Ts] = shape + + def get_shape(self) -> Tuple[*Ts]: + return self._shape + + shape = (Height(480), Width(640)) + x: Array[Height, Width] = Array(shape) + y = abs(x) # Inferred type is Array[Height, Width] + z = x + x # ... is Array[Height, Width] + x.get_shape() # ... is tuple[Height, Width] + + """ + + # Trick Generic __parameters__. + __class__ = typing.TypeVar + + def __iter__(self): + yield self.__unpacked__ + + def __init__(self, name, *, default=None): + self.__name__ = name + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + + self.__unpacked__ = Unpack[self] + + def __repr__(self): + return self.__name__ + + def __hash__(self): + return object.__hash__(self) + + def __eq__(self, other): + return self is other + + def __reduce__(self): + return self.__name__ + + def __init_subclass__(self, *args, **kwds): + if '_root' not in kwds: + raise TypeError("Cannot subclass special typing classes") + + +if hasattr(typing, "reveal_type"): + reveal_type = typing.reveal_type +else: + def reveal_type(__obj: T) -> T: + """Reveal the inferred type of a variable. + + When a static type checker encounters a call to ``reveal_type()``, + it will emit the inferred type of the argument:: + + x: int = 1 + reveal_type(x) + + Running a static type checker (e.g., ``mypy``) on this example + will produce output similar to 'Revealed type is "builtins.int"'. + + At runtime, the function prints the runtime type of the + argument and returns it unchanged. + + """ + print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr) + return __obj + + +if hasattr(typing, "assert_never"): + assert_never = typing.assert_never +else: + def assert_never(__arg: Never) -> Never: + """Assert to the type checker that a line of code is unreachable. + + Example:: + + def int_or_str(arg: int | str) -> None: + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + assert_never(arg) + + If a type checker finds that a call to assert_never() is + reachable, it will emit an error. + + At runtime, this throws an exception when called. + + """ + raise AssertionError("Expected code to be unreachable") + + +if hasattr(typing, 'dataclass_transform'): + dataclass_transform = typing.dataclass_transform +else: + def dataclass_transform( + *, + eq_default: bool = True, + order_default: bool = False, + kw_only_default: bool = False, + field_specifiers: typing.Tuple[ + typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]], + ... + ] = (), + **kwargs: typing.Any, + ) -> typing.Callable[[T], T]: + """Decorator that marks a function, class, or metaclass as providing + dataclass-like behavior. + + Example: + + from typing_extensions import dataclass_transform + + _T = TypeVar("_T") + + # Used on a decorator function + @dataclass_transform() + def create_model(cls: type[_T]) -> type[_T]: + ... + return cls + + @create_model + class CustomerModel: + id: int + name: str + + # Used on a base class + @dataclass_transform() + class ModelBase: ... + + class CustomerModel(ModelBase): + id: int + name: str + + # Used on a metaclass + @dataclass_transform() + class ModelMeta(type): ... + + class ModelBase(metaclass=ModelMeta): ... + + class CustomerModel(ModelBase): + id: int + name: str + + Each of the ``CustomerModel`` classes defined in this example will now + behave similarly to a dataclass created with the ``@dataclasses.dataclass`` + decorator. For example, the type checker will synthesize an ``__init__`` + method. + + The arguments to this decorator can be used to customize this behavior: + - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be + True or False if it is omitted by the caller. + - ``order_default`` indicates whether the ``order`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``kw_only_default`` indicates whether the ``kw_only`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``field_specifiers`` specifies a static list of supported classes + or functions that describe fields, similar to ``dataclasses.field()``. + + At runtime, this decorator records its arguments in the + ``__dataclass_transform__`` attribute on the decorated object. + + See PEP 681 for details. + + """ + def decorator(cls_or_fn): + cls_or_fn.__dataclass_transform__ = { + "eq_default": eq_default, + "order_default": order_default, + "kw_only_default": kw_only_default, + "field_specifiers": field_specifiers, + "kwargs": kwargs, + } + return cls_or_fn + return decorator + + +if hasattr(typing, "override"): + override = typing.override +else: + _F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any]) + + def override(__arg: _F) -> _F: + """Indicate that a method is intended to override a method in a base class. + + Usage: + + class Base: + def method(self) -> None: ... + pass + + class Child(Base): + @override + def method(self) -> None: + super().method() + + When this decorator is applied to a method, the type checker will + validate that it overrides a method with the same name on a base class. + This helps prevent bugs that may occur when a base class is changed + without an equivalent change to a child class. + + See PEP 698 for details. + + """ + return __arg + + +# We have to do some monkey patching to deal with the dual nature of +# Unpack/TypeVarTuple: +# - We want Unpack to be a kind of TypeVar so it gets accepted in +# Generic[Unpack[Ts]] +# - We want it to *not* be treated as a TypeVar for the purposes of +# counting generic parameters, so that when we subscript a generic, +# the runtime doesn't try to substitute the Unpack with the subscripted type. +if not hasattr(typing, "TypeVarTuple"): + typing._collect_type_vars = _collect_type_vars + typing._check_generic = _check_generic + + +# Backport typing.NamedTuple as it exists in Python 3.11. +# In 3.11, the ability to define generic `NamedTuple`s was supported. +# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8. +if sys.version_info >= (3, 11): + NamedTuple = typing.NamedTuple +else: + def _caller(): + try: + return sys._getframe(2).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): # For platforms without _getframe() + return None + + def _make_nmtuple(name, types, module, defaults=()): + fields = [n for n, t in types] + annotations = {n: typing._type_check(t, f"field {n} annotation must be a type") + for n, t in types} + nm_tpl = collections.namedtuple(name, fields, + defaults=defaults, module=module) + nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations + # The `_field_types` attribute was removed in 3.9; + # in earlier versions, it is the same as the `__annotations__` attribute + if sys.version_info < (3, 9): + nm_tpl._field_types = annotations + return nm_tpl + + _prohibited_namedtuple_fields = typing._prohibited + _special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'}) + + class _NamedTupleMeta(type): + def __new__(cls, typename, bases, ns): + assert _NamedTuple in bases + for base in bases: + if base is not _NamedTuple and base is not typing.Generic: + raise TypeError( + 'can only inherit from a NamedTuple type and Generic') + bases = tuple(tuple if base is _NamedTuple else base for base in bases) + types = ns.get('__annotations__', {}) + default_names = [] + for field_name in types: + if field_name in ns: + default_names.append(field_name) + elif default_names: + raise TypeError(f"Non-default namedtuple field {field_name} " + f"cannot follow default field" + f"{'s' if len(default_names) > 1 else ''} " + f"{', '.join(default_names)}") + nm_tpl = _make_nmtuple( + typename, types.items(), + defaults=[ns[n] for n in default_names], + module=ns['__module__'] + ) + nm_tpl.__bases__ = bases + if typing.Generic in bases: + class_getitem = typing.Generic.__class_getitem__.__func__ + nm_tpl.__class_getitem__ = classmethod(class_getitem) + # update from user namespace without overriding special namedtuple attributes + for key in ns: + if key in _prohibited_namedtuple_fields: + raise AttributeError("Cannot overwrite NamedTuple attribute " + key) + elif key not in _special_namedtuple_fields and key not in nm_tpl._fields: + setattr(nm_tpl, key, ns[key]) + if typing.Generic in bases: + nm_tpl.__init_subclass__() + return nm_tpl + + def NamedTuple(__typename, __fields=None, **kwargs): + if __fields is None: + __fields = kwargs.items() + elif kwargs: + raise TypeError("Either list of fields or keywords" + " can be provided to NamedTuple, not both") + return _make_nmtuple(__typename, __fields, module=_caller()) + + NamedTuple.__doc__ = typing.NamedTuple.__doc__ + _NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {}) + + # On 3.8+, alter the signature so that it matches typing.NamedTuple. + # The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7, + # so just leave the signature as it is on 3.7. + if sys.version_info >= (3, 8): + NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)' + + def _namedtuple_mro_entries(bases): + assert NamedTuple in bases + return (_NamedTuple,) + + NamedTuple.__mro_entries__ = _namedtuple_mro_entries diff --git a/lib/pkg_resources/_vendor/vendored.txt b/lib/pkg_resources/_vendor/vendored.txt index 8e015069..da7d08d2 100644 --- a/lib/pkg_resources/_vendor/vendored.txt +++ b/lib/pkg_resources/_vendor/vendored.txt @@ -1,6 +1,9 @@ -packaging==21.3 -pyparsing==3.0.9 -appdirs==1.4.3 +packaging==23.0 + +platformdirs==2.6.2 +# required for platformdirs on Python < 3.8 +typing_extensions==4.4.0 + jaraco.text==3.7.0 # required for jaraco.text on older Pythons importlib_resources==5.4.0 diff --git a/lib/pkg_resources/api_tests.txt b/lib/pkg_resources/api_tests.txt index ded18800..d72b85aa 100644 --- a/lib/pkg_resources/api_tests.txt +++ b/lib/pkg_resources/api_tests.txt @@ -338,49 +338,72 @@ Environment Markers >>> import os >>> print(im("sys_platform")) - Invalid marker: 'sys_platform', parse error at '' + Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, in, not in + sys_platform + ^ >>> print(im("sys_platform==")) - Invalid marker: 'sys_platform==', parse error at '' + Expected a marker variable or quoted string + sys_platform== + ^ >>> print(im("sys_platform=='win32'")) False >>> print(im("sys=='x'")) - Invalid marker: "sys=='x'", parse error at "sys=='x'" + Expected a marker variable or quoted string + sys=='x' + ^ >>> print(im("(extra)")) - Invalid marker: '(extra)', parse error at ')' + Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, in, not in + (extra) + ^ >>> print(im("(extra")) - Invalid marker: '(extra', parse error at '' + Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, in, not in + (extra + ^ >>> print(im("os.open('foo')=='y'")) - Invalid marker: "os.open('foo')=='y'", parse error at 'os.open(' + Expected a marker variable or quoted string + os.open('foo')=='y' + ^ >>> print(im("'x'=='y' and os.open('foo')=='y'")) # no short-circuit! - Invalid marker: "'x'=='y' and os.open('foo')=='y'", parse error at 'and os.o' + Expected a marker variable or quoted string + 'x'=='y' and os.open('foo')=='y' + ^ >>> print(im("'x'=='x' or os.open('foo')=='y'")) # no short-circuit! - Invalid marker: "'x'=='x' or os.open('foo')=='y'", parse error at 'or os.op' - - >>> print(im("'x' < 'y' < 'z'")) - Invalid marker: "'x' < 'y' < 'z'", parse error at "< 'z'" + Expected a marker variable or quoted string + 'x'=='x' or os.open('foo')=='y' + ^ >>> print(im("r'x'=='x'")) - Invalid marker: "r'x'=='x'", parse error at "r'x'=='x" + Expected a marker variable or quoted string + r'x'=='x' + ^ >>> print(im("'''x'''=='x'")) - Invalid marker: "'''x'''=='x'", parse error at "'x'''=='" + Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, in, not in + '''x'''=='x' + ^ >>> print(im('"""x"""=="x"')) - Invalid marker: '"""x"""=="x"', parse error at '"x"""=="' + Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, in, not in + """x"""=="x" + ^ >>> print(im(r"x\n=='x'")) - Invalid marker: "x\\n=='x'", parse error at "x\\n=='x'" + Expected a marker variable or quoted string + x\n=='x' + ^ >>> print(im("os.open=='y'")) - Invalid marker: "os.open=='y'", parse error at 'os.open=' + Expected a marker variable or quoted string + os.open=='y' + ^ >>> em("sys_platform=='win32'") == (sys.platform=='win32') True diff --git a/lib/pkg_resources/extern/__init__.py b/lib/pkg_resources/extern/__init__.py index 70897eea..948bcc60 100644 --- a/lib/pkg_resources/extern/__init__.py +++ b/lib/pkg_resources/extern/__init__.py @@ -58,7 +58,8 @@ class VendorImporter: """Return a module spec for vendored names.""" return ( importlib.util.spec_from_loader(fullname, self) - if self._module_matches_namespace(fullname) else None + if self._module_matches_namespace(fullname) + else None ) def install(self): @@ -70,7 +71,10 @@ class VendorImporter: names = ( - 'packaging', 'pyparsing', 'appdirs', 'jaraco', 'importlib_resources', + 'packaging', + 'platformdirs', + 'jaraco', + 'importlib_resources', 'more_itertools', ) VendorImporter(__name__, names).install() From 8ddffb7882cb625bc0893994b6acce111bffae83 Mon Sep 17 00:00:00 2001 From: Prinz23 Date: Sat, 11 Feb 2023 18:02:58 +0000 Subject: [PATCH 04/21] Change py2 deprecation cleanups. Remove py2 part from _23.py Remove more mapped stuff. Replace filter_iter with native filter. Replace map_iter with native map. Remove unidecode from _23 (empty wrapper on py3). Remove map_list and replace with native list(map( for performance reasons. Replace filter_list with list(filter. Replace list_keys with list(. Replace list_values with list(...values()). Replace list_items with list(....items()). Replace ordered_dict with dict. Fix tvinfo base type docs. Remove py2 parts from sg_futures. Remove scandir lib ... it's a sub module of os in py3. Remove PY2 stuff. Ignore unknown ids for characters/persons. Fix tvdb image parsing. Ignore unknown id sources on person page. --- gui/slick/interfaces/default/cast_person.tmpl | 4 + .../interfaces/default/config_general.tmpl | 3 +- .../default/inc_qualityChooser.tmpl | 9 +- .../interfaces/default/manage_massEdit.tmpl | 5 +- gui/slick/interfaces/default/viewlogs.tmpl | 3 +- lib/_23.py | 208 +----- lib/api_tmdb/tmdb_api.py | 9 +- lib/api_tvdb/tvdb_api.py | 23 +- lib/api_tvmaze/tvmaze_api.py | 3 +- lib/enzyme/fourcc.py | 4 +- lib/rtorrent/__init__.py | 19 +- lib/rtorrent/file.py | 4 +- lib/rtorrent/group.py | 4 +- lib/rtorrent/rpc/__init__.py | 12 +- lib/rtorrent/torrent.py | 12 +- lib/rtorrent/tracker.py | 4 +- lib/scandir/LICENSE.txt | 27 - lib/scandir/__init__.py | 0 lib/scandir/scandir.py | 697 ------------------ lib/sg_futures/__init__.py | 6 +- lib/sg_futures/base.py | 9 +- lib/sg_futures/futures/__init__.py | 23 - lib/sg_futures/futures/_base.py | 673 ----------------- lib/sg_futures/futures/process.py | 363 --------- lib/sg_futures/futures/thread.py | 207 ------ lib/sg_futures/py2.py | 55 -- lib/sg_helpers.py | 12 +- lib/tvinfo_base/base.py | 25 +- sickgear/__init__.py | 45 +- sickgear/_legacy.py | 4 +- sickgear/classes.py | 91 +-- sickgear/clients/download_station.py | 36 +- sickgear/clients/qbittorrent.py | 27 +- sickgear/common.py | 5 +- sickgear/config.py | 4 +- sickgear/db.py | 18 +- sickgear/failed_history.py | 5 - sickgear/helpers.py | 7 +- sickgear/history.py | 5 - sickgear/indexermapper.py | 7 +- sickgear/indexers/indexer_api.py | 12 +- sickgear/metadata/__init__.py | 3 +- sickgear/metadata/generic.py | 9 +- sickgear/metadata/kodi.py | 4 +- sickgear/name_parser/parser.py | 15 +- sickgear/network_timezones.py | 7 +- sickgear/notifiers/__init__.py | 17 +- sickgear/notifiers/emby.py | 4 +- sickgear/notifiers/plex.py | 23 +- sickgear/notifiers/trakt.py | 5 +- sickgear/notifiers/xbmc.py | 4 +- sickgear/piper.py | 20 +- sickgear/postProcessor.py | 14 +- sickgear/processTV.py | 36 +- sickgear/properFinder.py | 20 +- sickgear/providers/__init__.py | 25 +- sickgear/providers/alpharatio.py | 2 - sickgear/providers/bithdtv.py | 2 - sickgear/providers/blutopia.py | 4 +- sickgear/providers/btn.py | 2 - sickgear/providers/eztv.py | 3 +- sickgear/providers/fano.py | 2 - sickgear/providers/filelist.py | 2 - sickgear/providers/funfile.py | 2 - sickgear/providers/generic.py | 42 +- sickgear/providers/hdspace.py | 2 - sickgear/providers/hdtorrents.py | 2 - sickgear/providers/immortalseed.py | 2 - sickgear/providers/iptorrents.py | 3 +- sickgear/providers/limetorrents.py | 4 +- sickgear/providers/magnetdl.py | 2 - sickgear/providers/milkie.py | 3 - sickgear/providers/morethan.py | 3 +- sickgear/providers/ncore.py | 2 - sickgear/providers/nebulance.py | 7 +- sickgear/providers/nyaa.py | 2 - sickgear/providers/pretome.py | 2 - sickgear/providers/privatehd.py | 4 +- sickgear/providers/ptf.py | 2 - sickgear/providers/revtt.py | 2 - sickgear/providers/scenehd.py | 2 - sickgear/providers/scenetime.py | 2 - sickgear/providers/shazbat.py | 3 +- sickgear/providers/showrss.py | 15 +- sickgear/providers/snowfl.py | 18 +- sickgear/providers/speedapp.py | 14 +- sickgear/providers/speedcd.py | 10 +- sickgear/providers/thepiratebay.py | 3 +- sickgear/providers/tokyotoshokan.py | 8 +- sickgear/providers/torlock.py | 4 +- sickgear/providers/torrenting.py | 2 - sickgear/providers/torrentleech.py | 8 +- sickgear/providers/tvchaosuk.py | 4 +- sickgear/providers/xspeeds.py | 2 - sickgear/scene_exceptions.py | 18 +- sickgear/scene_numbering.py | 10 +- sickgear/search.py | 11 +- sickgear/search_backlog.py | 11 +- sickgear/search_queue.py | 6 +- sickgear/sgdatetime.py | 29 +- sickgear/show_name_helpers.py | 4 +- sickgear/trakt_helpers.py | 4 +- sickgear/tv.py | 36 +- sickgear/tvcache.py | 10 +- sickgear/version_checker.py | 3 +- sickgear/webapi.py | 8 +- sickgear/webserve.py | 124 ++-- sickgear/webserveInit.py | 16 +- 108 files changed, 460 insertions(+), 2923 deletions(-) delete mode 100644 lib/scandir/LICENSE.txt delete mode 100644 lib/scandir/__init__.py delete mode 100644 lib/scandir/scandir.py delete mode 100644 lib/sg_futures/futures/__init__.py delete mode 100644 lib/sg_futures/futures/_base.py delete mode 100644 lib/sg_futures/futures/process.py delete mode 100644 lib/sg_futures/futures/thread.py delete mode 100644 lib/sg_futures/py2.py diff --git a/gui/slick/interfaces/default/cast_person.tmpl b/gui/slick/interfaces/default/cast_person.tmpl index 3ce066a3..3d9b9568 100644 --- a/gui/slick/interfaces/default/cast_person.tmpl +++ b/gui/slick/interfaces/default/cast_person.tmpl @@ -182,7 +182,11 @@ def param(visible=True, rid=None, cache_person=None, cache_char=None, person=Non #end if #set $section_links = False +#set $all_sources = $TVInfoAPI().all_sources #for $cur_src, $cur_sid in sorted(iteritems($person.ids)) + #if $cur_src not in $all_sources: + #continue + #end if #if $TVInfoAPI($cur_src).config.get('people_url') #if not $section_links #set $section_links = True diff --git a/gui/slick/interfaces/default/config_general.tmpl b/gui/slick/interfaces/default/config_general.tmpl index 9f604721..e776220f 100644 --- a/gui/slick/interfaces/default/config_general.tmpl +++ b/gui/slick/interfaces/default/config_general.tmpl @@ -13,7 +13,6 @@ #from sickgear.sgdatetime import * <% def sg_var(varname, default=False): return getattr(sickgear, varname, default) %>#slurp# <% def sg_str(varname, default=''): return getattr(sickgear, varname, default) %>#slurp# -#from _23 import list_keys ## #set global $title = 'Config - General' #set global $header = 'General Settings' @@ -846,7 +845,7 @@ File logging level: #for $cur_quality in sorted($any_quality_list): @@ -96,7 +95,7 @@

Upgrade to

-#set $best_quality_list = filter_list(lambda x: x > $Quality.SDTV and x < $Quality.UNKNOWN, $Quality.qualityStrings) +#set $best_quality_list = list(filter(lambda x: x > $Quality.SDTV and x < $Quality.UNKNOWN, $Quality.qualityStrings)) #for $curQuality in sorted($anyQualityList): @@ -78,7 +77,7 @@

Upgrade to

-#set $bestQualityList = filter_list(lambda x: x > $Quality.SDTV, $Quality.qualityStrings) +#set $bestQualityList = list(filter(lambda x: x > $Quality.SDTV, $Quality.qualityStrings)) -#set $levels = $list_keys($reverseNames) +#set $levels = $list($reverseNames) #set void = $levels.sort(key=lambda x: $reverseNames[$x]) #set $level_count = len($levels) #for $level in $levels diff --git a/lib/_23.py b/lib/_23.py index ea8835d6..d8f2b0c5 100644 --- a/lib/_23.py +++ b/lib/_23.py @@ -19,8 +19,8 @@ import datetime from collections import deque from itertools import islice from sys import version_info +from base64 import encodebytes as b64encodebytes -from six import binary_type, moves # noinspection PyUnresolvedReferences from six.moves.urllib.parse import quote, quote_plus, unquote as six_unquote, unquote_plus as six_unquote_plus, \ urlencode, urlsplit, urlunparse, urlunsplit @@ -42,15 +42,11 @@ if False: PY38 = version_info[0:2] >= (3, 8) -""" one off consumables (Iterators) """ -filter_iter = moves.filter # type: Callable[[Callable, Iterable], Iterator] -map_iter = moves.map # type: Callable[[Callable, ...], Iterator] - def map_consume(*args): # type: (...) -> None """Run a lambda over elements without returning anything""" - deque(moves.map(*args), maxlen=0) + deque(map(*args), maxlen=0) def consume(iterator, n=None): @@ -76,7 +72,7 @@ def consume(iterator, n=None): def decode_str(s, encoding='utf-8', errors=None): # type: (...) -> AnyStr - if isinstance(s, binary_type): + if isinstance(s, bytes): if None is errors: return s.decode(encoding) return s.decode(encoding, errors) @@ -99,7 +95,7 @@ def html_unescape(s): def list_range(*args, **kwargs): # type: (...) -> List - return list(moves.range(*args, **kwargs)) + return list(range(*args, **kwargs)) def urlparse(url, scheme='', allow_fragments=True): @@ -135,181 +131,45 @@ def b64encodestring(s, keep_eol=False): return data.rstrip() -if 2 != version_info[0]: - # --------- - # Python 3+ - # --------- - # noinspection PyUnresolvedReferences,PyProtectedMember - from base64 import decodebytes, encodebytes - b64decodebytes = decodebytes - b64encodebytes = encodebytes - # noinspection PyUnresolvedReferences,PyCompatibility - from configparser import ConfigParser - # noinspection PyUnresolvedReferences - from enum import Enum - # noinspection PyUnresolvedReferences - from os import scandir, DirEntry - # noinspection PyUnresolvedReferences - from itertools import zip_longest - # noinspection PyUnresolvedReferences - from inspect import getfullargspec as getargspec +# noinspection PyUnresolvedReferences,PyProtectedMember +# noinspection PyUnresolvedReferences,PyCompatibility +from configparser import ConfigParser +# noinspection PyUnresolvedReferences +from enum import Enum +# noinspection PyUnresolvedReferences +from os import scandir, DirEntry +# noinspection PyUnresolvedReferences +from itertools import zip_longest +# noinspection PyUnresolvedReferences +from inspect import getfullargspec as getargspec - # noinspection PyUnresolvedReferences - from subprocess import Popen +# noinspection PyUnresolvedReferences +from subprocess import Popen - # noinspection PyUnresolvedReferences, PyPep8Naming - import xml.etree.ElementTree as etree +# noinspection PyUnresolvedReferences, PyPep8Naming +import xml.etree.ElementTree as etree - ordered_dict = dict +native_timestamp = datetime.datetime.timestamp # type: Callable[[datetime.datetime], float] - native_timestamp = datetime.datetime.timestamp # type: Callable[[datetime.datetime], float] - def unquote(string, encoding='utf-8', errors='replace'): - return decode_str(six_unquote(decode_str(string, encoding, errors), encoding=encoding, errors=errors), - encoding, errors) +def unquote(string, encoding='utf-8', errors='replace'): + return decode_str(six_unquote(decode_str(string, encoding, errors), encoding=encoding, errors=errors), + encoding, errors) - def unquote_plus(string, encoding='utf-8', errors='replace'): - return decode_str(six_unquote_plus(decode_str(string, encoding, errors), encoding=encoding, errors=errors), - encoding, errors) - def decode_bytes(d, encoding='utf-8', errors='replace'): - if not isinstance(d, binary_type): - # noinspection PyArgumentList - return bytes(d, encoding=encoding, errors=errors) - return d +def unquote_plus(string, encoding='utf-8', errors='replace'): + return decode_str(six_unquote_plus(decode_str(string, encoding, errors), encoding=encoding, errors=errors), + encoding, errors) - def filter_list(*args): - # type: (...) -> List - return list(filter(*args)) - def list_items(d): - # type: (Dict) -> List[Tuple[Any, Any]] - """ - equivalent to python 2 .items() - """ - return list(d.items()) +def decode_bytes(d, encoding='utf-8', errors='replace'): + if not isinstance(d, bytes): + # noinspection PyArgumentList + return bytes(d, encoding=encoding, errors=errors) + return d - def list_keys(d): - # type: (Dict) -> List - """ - equivalent to python 2 .keys() - """ - return list(d) - def list_values(d): - # type: (Dict) -> List - """ - equivalent to python 2 .values() - """ - return list(d.values()) +def map_none(*args): + # type: (...) -> List + return list(zip_longest(*args)) - def map_list(*args): - # type: (...) -> List - return list(map(*args)) - - def map_none(*args): - # type: (...) -> List - return list(zip_longest(*args)) - - def unidecode(data): - # type: (AnyStr) -> AnyStr - return data - -else: - # --------- - # Python 2 - # --------- - import time - from lib.unidecode import unidecode as unicode_decode - # noinspection PyProtectedMember,PyDeprecation - from base64 import decodestring, encodestring - # noinspection PyDeprecation - b64decodebytes = decodestring - # noinspection PyDeprecation - b64encodebytes = encodestring - # noinspection PyUnresolvedReferences - from lib.backports.configparser import ConfigParser - # noinspection PyUnresolvedReferences - from lib.enum34 import Enum - # noinspection PyProtectedMember,PyUnresolvedReferences - from lib.scandir.scandir import scandir, GenericDirEntry as DirEntry - # noinspection PyUnresolvedReferences,PyDeprecation - from inspect import getargspec - - try: - # noinspection PyPep8Naming - import xml.etree.cElementTree as etree - except ImportError: - # noinspection PyPep8Naming - import xml.etree.ElementTree as etree - - from collections import OrderedDict - ordered_dict = OrderedDict - - def _totimestamp(dt=None): - # type: (datetime.datetime) -> float - """ This function should only be used in this module due to its 1970s+ limitation as that's all we need here and - sgdatatime can't be used at this module level - """ - return time.mktime(dt.timetuple()) - - native_timestamp = _totimestamp # type: Callable[[datetime.datetime], float] - - from subprocess import Popen as _Popen - - class Popen(_Popen): - - def __enter__(self): - return self - - def __exit__(self, *args, **kwargs): - for x in filter_iter(lambda y: y, [self.stdout, self.stderr, self.stdin]): - x.close() - self.wait() - - def unquote(string, encoding='utf-8', errors='replace'): - return decode_str(six_unquote(decode_str(string, encoding, errors)), encoding, errors) - - def unquote_plus(string, encoding='utf-8', errors='replace'): - return decode_str(six_unquote_plus(decode_str(string, encoding, errors)), encoding, errors) - - # noinspection PyUnusedLocal - def decode_bytes(d, encoding='utf-8', errors='replace'): - if not isinstance(d, binary_type): - return bytes(d) - return d - - def filter_list(*args): - # type: (...) -> List - # noinspection PyTypeChecker - return filter(*args) - - def list_items(d): - # type: (Dict) -> List[Tuple[Any, Any]] - # noinspection PyTypeChecker - return d.items() - - def list_keys(d): - # type: (Dict) -> List - # noinspection PyTypeChecker - return d.keys() - - def list_values(d): - # type: (Dict) -> List - # noinspection PyTypeChecker - return d.values() - - def map_list(*args): - # type: (...) -> List - # noinspection PyTypeChecker - return map(*args) - - def map_none(*args): - # type: (...) -> List - # noinspection PyTypeChecker - return map(None, *args) - - def unidecode(data): - # type: (AnyStr) -> AnyStr - # noinspection PyUnresolvedReferences - return isinstance(data, unicode) and unicode_decode(data) or data diff --git a/lib/api_tmdb/tmdb_api.py b/lib/api_tmdb/tmdb_api.py index c7db1dfc..2f3a8fad 100644 --- a/lib/api_tmdb/tmdb_api.py +++ b/lib/api_tmdb/tmdb_api.py @@ -21,7 +21,6 @@ from lib.tvinfo_base import CastList, PersonGenders, RoleTypes, \ from json_helper import json_dumps from sg_helpers import clean_data, get_url, iterate_chunk, try_int -from _23 import filter_list from six import iteritems # noinspection PyUnreachableCode @@ -682,12 +681,12 @@ class TmdbIndexer(TVInfoBase): season_cast_obj['id'] for season_cast_obj in season_data[season_obj[0]].get('cast') or []]) - for person_obj in sorted(filter_list(lambda a: a['id'] in main_cast_ids, - show_data['aggregate_credits']['cast'] or [])[:50], + for person_obj in sorted(list(filter(lambda a: a['id'] in main_cast_ids, + show_data['aggregate_credits']['cast'] or []))[:50], key=lambda c: (main_cast_ids.get(c['id'], 0) or 0, c['total_episode_count'], c['order'] * -1), reverse=True): - for character in sorted(filter_list(lambda b: b['credit_id'] in main_cast_credit_ids, - person_obj.get('roles', []) or []), + for character in sorted(list(filter(lambda b: b['credit_id'] in main_cast_credit_ids, + person_obj.get('roles', []) or [])), key=lambda c: c['episode_count'], reverse=True): character_obj = TVInfoCharacter( name=clean_data(character['character']), diff --git a/lib/api_tvdb/tvdb_api.py b/lib/api_tvdb/tvdb_api.py index 009b91d7..dc679ad5 100644 --- a/lib/api_tvdb/tvdb_api.py +++ b/lib/api_tvdb/tvdb_api.py @@ -39,7 +39,6 @@ from lib.tvinfo_base import CastList, TVInfoCharacter, CrewList, TVInfoPerson, R from .tvdb_exceptions import TvdbError, TvdbShownotfound, TvdbTokenexpired from .tvdb_ui import BaseUI, ConsoleUI -from _23 import filter_list, list_keys, list_values, map_list from six import integer_types, iteritems, PY2, string_types # noinspection PyUnreachableCode @@ -290,7 +289,7 @@ class Tvdb(TVInfoBase): 'nl': 'nld', 'no': 'nor', 'pl': 'pol', 'pt': 'pot', 'ru': 'rus', 'sk': 'slv', 'sv': 'swe', 'zh': 'zho', '_1': 'srp', } - self.config['valid_languages_3'] = list_values(self.config['langabbv_23']) + self.config['valid_languages_3'] = list(self.config['langabbv_23'].values()) # TheTvdb.com should be based around numeric language codes, # but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16 @@ -358,7 +357,7 @@ class Tvdb(TVInfoBase): else: d_m = shows if d_m: - results = map_list(map_data, [d_m['data']]) + results = list(map(map_data, [d_m['data']])) if ids.get(TVINFO_TVDB_SLUG): cache_id_key = 's-id-%s-%s' % (TVINFO_TVDB, ids[TVINFO_TVDB_SLUG]) is_none, shows = self._get_cache_entry(cache_id_key) @@ -373,7 +372,7 @@ class Tvdb(TVInfoBase): if d_m: for r in d_m: if ids.get(TVINFO_TVDB_SLUG) == r['slug']: - results = map_list(map_data, [r]) + results = list(map(map_data, [r])) break if name: for n in ([name], name)[isinstance(name, list)]: @@ -390,7 +389,7 @@ class Tvdb(TVInfoBase): if r: if not isinstance(r, list): r = [r] - results.extend(map_list(map_data, r)) + results.extend(list(map(map_data, r))) seen = set() results = [seen.add(r['id']) or r for r in results if r['id'] not in seen] @@ -613,8 +612,8 @@ class Tvdb(TVInfoBase): # type: (int, Optional[str]) -> Optional[dict] results = self.search_tvs(sid, language=language) for cur_result in (isinstance(results, dict) and results.get('results') or []): - result = filter_list(lambda r: 'series' == r['type'] and sid == r['id'], - cur_result.get('nbHits') and cur_result.get('hits') or []) + result = list(filter(lambda r: 'series' == r['type'] and sid == r['id'], + cur_result.get('nbHits') and cur_result.get('hits') or [])) if 1 == len(result): result[0]['overview'] = self.clean_overview( result[0]['overviews'][self.config['langabbv_23'].get(language) or 'eng']) @@ -627,7 +626,7 @@ class Tvdb(TVInfoBase): # notify of new keys if ENV.get('SG_DEV_MODE'): - new_keys = set(list_keys(result[0])).difference({ + new_keys = set(list(result[0])).difference({ '_highlightResult', 'aliases', 'banner', 'fanart', 'firstaired', 'follower_count', 'id', 'image', 'is_tvdb_searchable', 'is_tvt_searchable', @@ -788,7 +787,7 @@ class Tvdb(TVInfoBase): series_found = self._getetsrc(self.config['url_search_series'], params=self.config['params_search_series'], language=self.config['language']) if series_found: - return list_values(series_found)[0] + return list(series_found.values())[0] except (BaseException, Exception): pass @@ -899,15 +898,15 @@ class Tvdb(TVInfoBase): try: for cur_result in (isinstance(results, dict) and results.get('results') or []): # sorts 'banners/images/missing/' to last before filter - people = filter_list( + people = list(filter( lambda r: 'person' == r['type'] and rc_clean.sub(name, '') == rc_clean.sub(r['name'], ''), cur_result.get('nbHits') and sorted(cur_result.get('hits'), - key=lambda x: len(x['image']), reverse=True) or []) + key=lambda x: len(x['image']), reverse=True) or [])) if ENV.get('SG_DEV_MODE'): for person in people: - new_keys = set(list_keys(person)).difference({ + new_keys = set(list(person)).difference({ '_highlightResult', 'banner', 'id', 'image', 'is_tvdb_searchable', 'is_tvt_searchable', 'name', 'objectID', 'people_birthdate', 'people_died', diff --git a/lib/api_tvmaze/tvmaze_api.py b/lib/api_tvmaze/tvmaze_api.py index 75400769..76cec287 100644 --- a/lib/api_tvmaze/tvmaze_api.py +++ b/lib/api_tvmaze/tvmaze_api.py @@ -27,7 +27,6 @@ from lib.tvinfo_base import TVInfoBase, TVInfoImage, TVInfoImageSize, TVInfoImag crew_type_names, TVInfoPerson, RoleTypes, TVInfoShow, TVInfoEpisode, TVInfoIDs, TVInfoNetwork, TVInfoSeason, \ PersonGenders, TVINFO_TVMAZE, TVINFO_TVDB, TVINFO_IMDB -from _23 import filter_iter from six import integer_types, iteritems, string_types # noinspection PyUnreachableCode @@ -683,7 +682,7 @@ class TvMaze(TVInfoBase): premieres = [] returning = [] rc_lang = re.compile('(?i)eng|jap') - for cur_show in filter_iter(lambda s: 1 == s.episode_number and ( + for cur_show in filter(lambda s: 1 == s.episode_number and ( None is s.show.language or rc_lang.search(s.show.language)), schedule): if 1 == cur_show.season_number: premieres += [cur_show] diff --git a/lib/enzyme/fourcc.py b/lib/enzyme/fourcc.py index 6f6cd324..a421443d 100644 --- a/lib/enzyme/fourcc.py +++ b/lib/enzyme/fourcc.py @@ -21,7 +21,7 @@ import string import re import struct from six import string_types, integer_types -from _23 import decode_str, list_items +from _23 import decode_str __all__ = ['resolve'] @@ -845,7 +845,7 @@ FOURCC = { } # make it fool prove -for code, value in list_items(FOURCC): +for code, value in list(FOURCC.items()): if not code.upper() in FOURCC: FOURCC[code.upper()] = value if code.endswith(' '): diff --git a/lib/rtorrent/__init__.py b/lib/rtorrent/__init__.py index 36544fec..e5c554e1 100644 --- a/lib/rtorrent/__init__.py +++ b/lib/rtorrent/__init__.py @@ -36,8 +36,6 @@ from .rpc import Method from .torrent import Torrent, methods as torrent_methods from .tracker import Tracker, methods as tracker_methods -from _23 import filter_iter, filter_list, map_list - __version__ = '0.2.10' __author__ = 'Chris Lucas' @@ -184,15 +182,16 @@ class RTorrent(object): @todo: add validity check for specified view """ self.torrents = [] - retriever_methods = filter_list(lambda m: m.is_retriever() and m.is_available(self), torrent_methods) + retriever_methods = list(filter(lambda m: m.is_retriever() and m.is_available(self), torrent_methods)) mc = rpc.Multicall(self) if self.method_exists('d.multicall2'): mc.add('d.multicall2', '', view, 'd.hash=', - *map_list(lambda m2: ((getattr(m2, 'aliases') or [''])[-1] or m2.rpc_call) + '=', retriever_methods)) + *list(map(lambda m2: ((getattr(m2, 'aliases') or [''])[-1] or m2.rpc_call) + '=', + retriever_methods))) else: mc.add('d.multicall', view, 'd.get_hash=', - *map_list(lambda m1: m1.rpc_call + '=', retriever_methods)) + *list(map(lambda m1: m1.rpc_call + '=', retriever_methods))) results = mc.call()[0] # only sent one call, only need first result @@ -240,7 +239,7 @@ class RTorrent(object): try: call, arg = x.split('=') method = rpc.find_method(call) - method_name = next(filter_iter(lambda m: self.method_exists(m), (method.rpc_call,) + method.aliases)) + method_name = next(filter(lambda m: self.method_exists(m), (method.rpc_call,) + method.aliases)) param += ['%s=%s' % (method_name, arg)] except (BaseException, Exception): pass @@ -267,7 +266,7 @@ class RTorrent(object): max_retries = 10 while max_retries: try: - t = next(filter_iter(lambda td: td.info_hash.upper() == info_hash, self.get_torrents())) + t = next(filter(lambda td: td.info_hash.upper() == info_hash, self.get_torrents())) break except (BaseException, Exception): time.sleep(self.request_interval) @@ -326,7 +325,7 @@ class RTorrent(object): if verify_load: while verify_retries: try: - t = next(filter_iter(lambda td: td.info_hash == info_hash, self.get_torrents())) + t = next(filter(lambda td: td.info_hash == info_hash, self.get_torrents())) break except (BaseException, Exception): time.sleep(self.request_interval) @@ -437,7 +436,7 @@ class RTorrent(object): method = rpc.find_method('d.get_local_id') result = True try: - func = next(filter_iter(lambda m: self.method_exists(m), (method.rpc_call,) + method.aliases)) + func = next(filter(lambda m: self.method_exists(m), (method.rpc_call,) + method.aliases)) getattr(self.get_connection(), func)(info_hash) except (BaseException, Exception): result = False @@ -466,7 +465,7 @@ class RTorrent(object): """ mc = rpc.Multicall(self) - for method in filter_iter(lambda m: m.is_retriever() and m.is_available(self), methods): + for method in filter(lambda m: m.is_retriever() and m.is_available(self), methods): mc.add(method) mc.call() diff --git a/lib/rtorrent/file.py b/lib/rtorrent/file.py index 6b8d38f6..a13d76ff 100644 --- a/lib/rtorrent/file.py +++ b/lib/rtorrent/file.py @@ -22,8 +22,6 @@ from . import rpc from .common import safe_repr from .rpc import Method -from _23 import filter_iter - class File(object): """Represents an individual file within a L{Torrent} instance.""" @@ -48,7 +46,7 @@ class File(object): """ mc = rpc.Multicall(self) - for method in filter_iter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), methods): + for method in filter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), methods): mc.add(method, self.rpc_id) mc.call() diff --git a/lib/rtorrent/group.py b/lib/rtorrent/group.py index 82c8fdc0..c64884a6 100644 --- a/lib/rtorrent/group.py +++ b/lib/rtorrent/group.py @@ -21,8 +21,6 @@ from . import rpc from .rpc import Method -from _23 import filter_iter - class Group(object): __name__ = 'Group' @@ -72,7 +70,7 @@ class Group(object): def _get_method(self, *choices): try: - return next(filter_iter(lambda method: self._rt_obj.method_exists(method), choices)) + return next(filter(lambda method: self._rt_obj.method_exists(method), choices)) except (BaseException, Exception): pass diff --git a/lib/rtorrent/rpc/__init__.py b/lib/rtorrent/rpc/__init__.py index bda54fcd..abd824c2 100644 --- a/lib/rtorrent/rpc/__init__.py +++ b/lib/rtorrent/rpc/__init__.py @@ -27,8 +27,6 @@ import re import rtorrent -from _23 import filter_iter, map_list - def get_varname(rpc_call): """Transform rpc method into variable name. @@ -94,8 +92,8 @@ class Method(object): if rt_obj.get_client_version_tuple() >= self.min_version: try: - self.varname = get_varname(next(filter_iter(lambda f: rt_obj.method_exists(f), - (self.rpc_call,) + tuple(getattr(self, 'aliases', ''))))) + self.varname = get_varname(next(filter(lambda f: rt_obj.method_exists(f), + (self.rpc_call,) + tuple(getattr(self, 'aliases', ''))))) return True except (BaseException, Exception): pass @@ -162,7 +160,7 @@ class Multicall(object): getattr(xmc, rpc_call)(*args) try: - results = tuple(next(filter_iter(lambda x: isinstance(x, list), xmc().results))) + results = tuple(next(filter(lambda x: isinstance(x, list), xmc().results))) except (BaseException, Exception): return [[]] @@ -216,8 +214,8 @@ def find_method(rpc_call): """Return L{Method} instance associated with given RPC call""" try: rpc_call = rpc_call.lower() - return next(filter_iter(lambda m: rpc_call in map_list( - lambda n: n.lower(), [m.rpc_call] + list(getattr(m, 'aliases', []))), + return next(filter(lambda m: rpc_call in list(map( + lambda n: n.lower(), [m.rpc_call] + list(getattr(m, 'aliases', [])))), rtorrent.methods + rtorrent.torrent.methods + rtorrent.file.methods + rtorrent.tracker.methods + rtorrent.peer.methods)) except (BaseException, Exception): diff --git a/lib/rtorrent/torrent.py b/lib/rtorrent/torrent.py index e5574641..9b2e6ed9 100644 --- a/lib/rtorrent/torrent.py +++ b/lib/rtorrent/torrent.py @@ -25,8 +25,6 @@ from .peer import Peer, methods as peer_methods from .rpc import Method from .tracker import Tracker, methods as tracker_methods -from _23 import filter_iter, filter_list - class Torrent(object): """Represents an individual torrent within a L{RTorrent} instance.""" @@ -70,7 +68,7 @@ class Torrent(object): @note: also assigns return value to self.peers """ self.peers = [] - retriever_methods = filter_list(lambda m: m.is_retriever() and m.is_available(self._rt_obj), peer_methods) + retriever_methods = list(filter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), peer_methods)) mc = rpc.Multicall(self) # need to leave 2nd arg empty (dunno why) @@ -97,7 +95,7 @@ class Torrent(object): @note: also assigns return value to self.trackers """ self.trackers = [] - retriever_methods = filter_list(lambda m: m.is_retriever() and m.is_available(self._rt_obj), tracker_methods) + retriever_methods = list(filter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), tracker_methods)) mc = rpc.Multicall(self) # need to leave 2nd arg empty (dunno why) @@ -125,7 +123,7 @@ class Torrent(object): """ self.files = [] - retriever_methods = filter_list(lambda m: m.is_retriever() and m.is_available(self._rt_obj), file_methods) + retriever_methods = list(filter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), file_methods)) mc = rpc.Multicall(self) # 2nd arg can be anything, but it'll return all files in torrent @@ -155,7 +153,7 @@ class Torrent(object): def _get_method(self, *choices): try: - return next(filter_iter(lambda method: self._rt_obj.method_exists(method), choices)) + return next(filter(lambda method: self._rt_obj.method_exists(method), choices)) except (BaseException, Exception): pass @@ -276,7 +274,7 @@ class Torrent(object): """ mc = rpc.Multicall(self) - for method in filter_iter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), methods): + for method in filter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), methods): mc.add(method, self.rpc_id) mc.call() diff --git a/lib/rtorrent/tracker.py b/lib/rtorrent/tracker.py index 06904075..c1682476 100644 --- a/lib/rtorrent/tracker.py +++ b/lib/rtorrent/tracker.py @@ -22,8 +22,6 @@ from . import rpc from .common import safe_repr from .rpc import Method -from _23 import filter_iter - class Tracker(object): """Represents an individual tracker within a L{Torrent} instance.""" @@ -64,7 +62,7 @@ class Tracker(object): """ mc = rpc.Multicall(self) - for method in filter_iter(lambda fx: fx.is_retriever() and fx.is_available(self._rt_obj), methods): + for method in filter(lambda fx: fx.is_retriever() and fx.is_available(self._rt_obj), methods): mc.add(method, self.rpc_id) mc.call() diff --git a/lib/scandir/LICENSE.txt b/lib/scandir/LICENSE.txt deleted file mode 100644 index 0759f503..00000000 --- a/lib/scandir/LICENSE.txt +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012, Ben Hoyt -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this -list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -* Neither the name of Ben Hoyt nor the names of its contributors may be used -to endorse or promote products derived from this software without specific -prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/lib/scandir/__init__.py b/lib/scandir/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/lib/scandir/scandir.py b/lib/scandir/scandir.py deleted file mode 100644 index 9b9d7e71..00000000 --- a/lib/scandir/scandir.py +++ /dev/null @@ -1,697 +0,0 @@ -"""scandir, a better directory iterator and faster os.walk(), now in the Python 3.5 stdlib - -scandir() is a generator version of os.listdir() that returns an -iterator over files in a directory, and also exposes the extra -information most OSes provide while iterating files in a directory -(such as type and stat information). - -This module also includes a version of os.walk() that uses scandir() -to speed it up significantly. - -See README.md or https://github.com/benhoyt/scandir for rationale and -docs, or read PEP 471 (https://www.python.org/dev/peps/pep-0471/) for -more details on its inclusion into Python 3.5 - -scandir is released under the new BSD 3-clause license. See -LICENSE.txt for the full license text. -""" - -from __future__ import division - -from errno import ENOENT -from os import listdir, lstat, stat, strerror -from os.path import join, islink -from stat import S_IFDIR, S_IFLNK, S_IFREG -import collections -import sys - -try: - import _scandir -except ImportError: - _scandir = None - -try: - import ctypes -except ImportError: - ctypes = None - - -if None is not _scandir and None is not ctypes and not getattr(_scandir, 'DirEntry', None): - import warnings - warnings.warn("scandir compiled _scandir C module is too old, using slow generic fallback") - _scandir = None -elif _scandir is None and ctypes is None: - import warnings - warnings.warn("scandir can't find the compiled _scandir C module or ctypes, using slow generic fallback") - -__version__ = '1.10.0' -__all__ = ['scandir', 'walk'] - -# Windows FILE_ATTRIBUTE constants for interpreting the -# FIND_DATA.dwFileAttributes member -FILE_ATTRIBUTE_ARCHIVE = 32 -FILE_ATTRIBUTE_COMPRESSED = 2048 -FILE_ATTRIBUTE_DEVICE = 64 -FILE_ATTRIBUTE_DIRECTORY = 16 -FILE_ATTRIBUTE_ENCRYPTED = 16384 -FILE_ATTRIBUTE_HIDDEN = 2 -FILE_ATTRIBUTE_INTEGRITY_STREAM = 32768 -FILE_ATTRIBUTE_NORMAL = 128 -FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 8192 -FILE_ATTRIBUTE_NO_SCRUB_DATA = 131072 -FILE_ATTRIBUTE_OFFLINE = 4096 -FILE_ATTRIBUTE_READONLY = 1 -FILE_ATTRIBUTE_REPARSE_POINT = 1024 -FILE_ATTRIBUTE_SPARSE_FILE = 512 -FILE_ATTRIBUTE_SYSTEM = 4 -FILE_ATTRIBUTE_TEMPORARY = 256 -FILE_ATTRIBUTE_VIRTUAL = 65536 - -IS_PY3 = sys.version_info >= (3, 0) - -if IS_PY3: - unicode = str # Because Python <= 3.2 doesn't have u'unicode' syntax - - -class GenericDirEntry(object): - __slots__ = ('name', '_stat', '_lstat', '_scandir_path', '_path') - - def __init__(self, scandir_path, name): - self._scandir_path = scandir_path - self.name = name - self._stat = None - self._lstat = None - self._path = None - - @property - def path(self): - if self._path is None: - self._path = join(self._scandir_path, self.name) - return self._path - - def stat(self, follow_symlinks=True): - if follow_symlinks: - if self._stat is None: - self._stat = stat(self.path) - return self._stat - else: - if self._lstat is None: - self._lstat = lstat(self.path) - return self._lstat - - # The code duplication below is intentional: this is for slightly - # better performance on systems that fall back to GenericDirEntry. - # It avoids an additional attribute lookup and method call, which - # are relatively slow on CPython. - def is_dir(self, follow_symlinks=True): - try: - st = self.stat(follow_symlinks=follow_symlinks) - except OSError as e: - if e.errno != ENOENT: - raise - return False # Path doesn't exist or is a broken symlink - return st.st_mode & 0o170000 == S_IFDIR - - def is_file(self, follow_symlinks=True): - try: - st = self.stat(follow_symlinks=follow_symlinks) - except OSError as e: - if e.errno != ENOENT: - raise - return False # Path doesn't exist or is a broken symlink - return st.st_mode & 0o170000 == S_IFREG - - def is_symlink(self): - try: - st = self.stat(follow_symlinks=False) - except OSError as e: - if e.errno != ENOENT: - raise - return False # Path doesn't exist or is a broken symlink - return st.st_mode & 0o170000 == S_IFLNK - - def inode(self): - st = self.stat(follow_symlinks=False) - return st.st_ino - - def __str__(self): - return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name) - - __repr__ = __str__ - - -def _scandir_generic(path=unicode('.')): - """Like os.listdir(), but yield DirEntry objects instead of returning - a list of names. - """ - for name in listdir(path): - yield GenericDirEntry(path, name) - - -if IS_PY3 and sys.platform == 'win32': - def scandir_generic(path=unicode('.')): - if isinstance(path, bytes): - raise TypeError("os.scandir() doesn't support bytes path on Windows, use Unicode instead") - return _scandir_generic(path) - scandir_generic.__doc__ = _scandir_generic.__doc__ -else: - scandir_generic = _scandir_generic - - -scandir_c = None -scandir_python = None - - -if sys.platform == 'win32': - if ctypes is not None: - from ctypes import wintypes - - # Various constants from windows.h - INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value - ERROR_FILE_NOT_FOUND = 2 - ERROR_NO_MORE_FILES = 18 - IO_REPARSE_TAG_SYMLINK = 0xA000000C - - # Numer of seconds between 1601-01-01 and 1970-01-01 - SECONDS_BETWEEN_EPOCHS = 11644473600 - - kernel32 = ctypes.windll.kernel32 - - # ctypes wrappers for (wide string versions of) FindFirstFile, - # FindNextFile, and FindClose - FindFirstFile = kernel32.FindFirstFileW - FindFirstFile.argtypes = [ - wintypes.LPCWSTR, - ctypes.POINTER(wintypes.WIN32_FIND_DATAW), - ] - FindFirstFile.restype = wintypes.HANDLE - - FindNextFile = kernel32.FindNextFileW - FindNextFile.argtypes = [ - wintypes.HANDLE, - ctypes.POINTER(wintypes.WIN32_FIND_DATAW), - ] - FindNextFile.restype = wintypes.BOOL - - FindClose = kernel32.FindClose - FindClose.argtypes = [wintypes.HANDLE] - FindClose.restype = wintypes.BOOL - - Win32StatResult = collections.namedtuple('Win32StatResult', [ - 'st_mode', - 'st_ino', - 'st_dev', - 'st_nlink', - 'st_uid', - 'st_gid', - 'st_size', - 'st_atime', - 'st_mtime', - 'st_ctime', - 'st_atime_ns', - 'st_mtime_ns', - 'st_ctime_ns', - 'st_file_attributes', - ]) - - def filetime_to_time(filetime): - """Convert Win32 FILETIME to time since Unix epoch in seconds.""" - total = filetime.dwHighDateTime << 32 | filetime.dwLowDateTime - return total / 10000000 - SECONDS_BETWEEN_EPOCHS - - def find_data_to_stat(data): - """Convert Win32 FIND_DATA struct to stat_result.""" - # First convert Win32 dwFileAttributes to st_mode - attributes = data.dwFileAttributes - st_mode = 0 - if attributes & FILE_ATTRIBUTE_DIRECTORY: - st_mode |= S_IFDIR | 0o111 - else: - st_mode |= S_IFREG - if attributes & FILE_ATTRIBUTE_READONLY: - st_mode |= 0o444 - else: - st_mode |= 0o666 - if (attributes & FILE_ATTRIBUTE_REPARSE_POINT and - data.dwReserved0 == IO_REPARSE_TAG_SYMLINK): - st_mode ^= st_mode & 0o170000 - st_mode |= S_IFLNK - - st_size = data.nFileSizeHigh << 32 | data.nFileSizeLow - st_atime = filetime_to_time(data.ftLastAccessTime) - st_mtime = filetime_to_time(data.ftLastWriteTime) - st_ctime = filetime_to_time(data.ftCreationTime) - - # Some fields set to zero per CPython's posixmodule.c: st_ino, st_dev, - # st_nlink, st_uid, st_gid - return Win32StatResult(st_mode, 0, 0, 0, 0, 0, st_size, - st_atime, st_mtime, st_ctime, - int(st_atime * 1000000000), - int(st_mtime * 1000000000), - int(st_ctime * 1000000000), - attributes) - - class Win32DirEntryPython(object): - __slots__ = ('name', '_stat', '_lstat', '_find_data', '_scandir_path', '_path', '_inode') - - def __init__(self, scandir_path, name, find_data): - self._scandir_path = scandir_path - self.name = name - self._stat = None - self._lstat = None - self._find_data = find_data - self._path = None - self._inode = None - - @property - def path(self): - if self._path is None: - self._path = join(self._scandir_path, self.name) - return self._path - - def stat(self, follow_symlinks=True): - if follow_symlinks: - if self._stat is None: - if self.is_symlink(): - # It's a symlink, call link-following stat() - self._stat = stat(self.path) - else: - # Not a symlink, stat is same as lstat value - if self._lstat is None: - self._lstat = find_data_to_stat(self._find_data) - self._stat = self._lstat - return self._stat - else: - if self._lstat is None: - # Lazily convert to stat object, because it's slow - # in Python, and often we only need is_dir() etc - self._lstat = find_data_to_stat(self._find_data) - return self._lstat - - def is_dir(self, follow_symlinks=True): - is_symlink = self.is_symlink() - if follow_symlinks and is_symlink: - try: - return self.stat().st_mode & 0o170000 == S_IFDIR - except OSError as e: - if e.errno != ENOENT: - raise - return False - elif is_symlink: - return False - else: - return (self._find_data.dwFileAttributes & - FILE_ATTRIBUTE_DIRECTORY != 0) - - def is_file(self, follow_symlinks=True): - is_symlink = self.is_symlink() - if follow_symlinks and is_symlink: - try: - return self.stat().st_mode & 0o170000 == S_IFREG - except OSError as e: - if e.errno != ENOENT: - raise - return False - elif is_symlink: - return False - else: - return (self._find_data.dwFileAttributes & - FILE_ATTRIBUTE_DIRECTORY == 0) - - def is_symlink(self): - return (self._find_data.dwFileAttributes & - FILE_ATTRIBUTE_REPARSE_POINT != 0 and - self._find_data.dwReserved0 == IO_REPARSE_TAG_SYMLINK) - - def inode(self): - if self._inode is None: - self._inode = lstat(self.path).st_ino - return self._inode - - def __str__(self): - return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name) - - __repr__ = __str__ - - def win_error(error, filename): - exc = WindowsError(error, ctypes.FormatError(error)) - exc.filename = filename - return exc - - def _scandir_python(path=unicode('.')): - """Like os.listdir(), but yield DirEntry objects instead of returning - a list of names. - """ - # Call FindFirstFile and handle errors - if isinstance(path, bytes): - is_bytes = True - filename = join(path.decode('mbcs', 'strict'), '*.*') - else: - is_bytes = False - filename = join(path, '*.*') - data = wintypes.WIN32_FIND_DATAW() - data_p = ctypes.byref(data) - handle = FindFirstFile(filename, data_p) - if handle == INVALID_HANDLE_VALUE: - error = ctypes.GetLastError() - if error == ERROR_FILE_NOT_FOUND: - # No files, don't yield anything - return - raise win_error(error, path) - - # Call FindNextFile in a loop, stopping when no more files - try: - while True: - # Skip '.' and '..' (current and parent directory), but - # otherwise yield (filename, stat_result) tuple - name = data.cFileName - if name not in ('.', '..'): - if is_bytes: - name = name.encode('mbcs', 'replace') - yield Win32DirEntryPython(path, name, data) - - data = wintypes.WIN32_FIND_DATAW() - data_p = ctypes.byref(data) - success = FindNextFile(handle, data_p) - if not success: - error = ctypes.GetLastError() - if error == ERROR_NO_MORE_FILES: - break - raise win_error(error, path) - finally: - if not FindClose(handle): - raise win_error(ctypes.GetLastError(), path) - - if IS_PY3: - def scandir_python(path=unicode('.')): - if isinstance(path, bytes): - raise TypeError("os.scandir() doesn't support bytes path on Windows, use Unicode instead") - return _scandir_python(path) - scandir_python.__doc__ = _scandir_python.__doc__ - else: - scandir_python = _scandir_python - - if _scandir is not None: - scandir_c = _scandir.scandir - DirEntry_c = _scandir.DirEntry - - if _scandir is not None: - scandir = scandir_c - DirEntry = DirEntry_c - elif ctypes is not None: - scandir = scandir_python - DirEntry = Win32DirEntryPython - else: - scandir = scandir_generic - DirEntry = GenericDirEntry - - -# Linux, OS X, and BSD implementation -elif sys.platform.startswith(('linux', 'darwin', 'sunos5')) or 'bsd' in sys.platform: - have_dirent_d_type = (sys.platform != 'sunos5') - - if ctypes is not None and have_dirent_d_type: - import ctypes.util - - DIR_p = ctypes.c_void_p - - # Rather annoying how the dirent struct is slightly different on each - # platform. The only fields we care about are d_name and d_type. - class Dirent(ctypes.Structure): - if sys.platform.startswith('linux'): - _fields_ = ( - ('d_ino', ctypes.c_ulong), - ('d_off', ctypes.c_long), - ('d_reclen', ctypes.c_ushort), - ('d_type', ctypes.c_byte), - ('d_name', ctypes.c_char * 256), - ) - elif 'openbsd' in sys.platform: - _fields_ = ( - ('d_ino', ctypes.c_uint64), - ('d_off', ctypes.c_uint64), - ('d_reclen', ctypes.c_uint16), - ('d_type', ctypes.c_uint8), - ('d_namlen', ctypes.c_uint8), - ('__d_padding', ctypes.c_uint8 * 4), - ('d_name', ctypes.c_char * 256), - ) - else: - _fields_ = ( - ('d_ino', ctypes.c_uint32), # must be uint32, not ulong - ('d_reclen', ctypes.c_ushort), - ('d_type', ctypes.c_byte), - ('d_namlen', ctypes.c_byte), - ('d_name', ctypes.c_char * 256), - ) - - DT_UNKNOWN = 0 - DT_DIR = 4 - DT_REG = 8 - DT_LNK = 10 - - Dirent_p = ctypes.POINTER(Dirent) - Dirent_pp = ctypes.POINTER(Dirent_p) - - libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True) - opendir = libc.opendir - opendir.argtypes = [ctypes.c_char_p] - opendir.restype = DIR_p - - readdir_r = libc.readdir_r - readdir_r.argtypes = [DIR_p, Dirent_p, Dirent_pp] - readdir_r.restype = ctypes.c_int - - closedir = libc.closedir - closedir.argtypes = [DIR_p] - closedir.restype = ctypes.c_int - - file_system_encoding = sys.getfilesystemencoding() - - class PosixDirEntry(object): - __slots__ = ('name', '_d_type', '_stat', '_lstat', '_scandir_path', '_path', '_inode') - - def __init__(self, scandir_path, name, d_type, inode): - self._scandir_path = scandir_path - self.name = name - self._d_type = d_type - self._inode = inode - self._stat = None - self._lstat = None - self._path = None - - @property - def path(self): - if self._path is None: - self._path = join(self._scandir_path, self.name) - return self._path - - def stat(self, follow_symlinks=True): - if follow_symlinks: - if self._stat is None: - if self.is_symlink(): - self._stat = stat(self.path) - else: - if self._lstat is None: - self._lstat = lstat(self.path) - self._stat = self._lstat - return self._stat - else: - if self._lstat is None: - self._lstat = lstat(self.path) - return self._lstat - - def is_dir(self, follow_symlinks=True): - if (self._d_type == DT_UNKNOWN or - (follow_symlinks and self.is_symlink())): - try: - st = self.stat(follow_symlinks=follow_symlinks) - except OSError as e: - if e.errno != ENOENT: - raise - return False - return st.st_mode & 0o170000 == S_IFDIR - else: - return self._d_type == DT_DIR - - def is_file(self, follow_symlinks=True): - if (self._d_type == DT_UNKNOWN or - (follow_symlinks and self.is_symlink())): - try: - st = self.stat(follow_symlinks=follow_symlinks) - except OSError as e: - if e.errno != ENOENT: - raise - return False - return st.st_mode & 0o170000 == S_IFREG - else: - return self._d_type == DT_REG - - def is_symlink(self): - if self._d_type == DT_UNKNOWN: - try: - st = self.stat(follow_symlinks=False) - except OSError as e: - if e.errno != ENOENT: - raise - return False - return st.st_mode & 0o170000 == S_IFLNK - else: - return self._d_type == DT_LNK - - def inode(self): - return self._inode - - def __str__(self): - return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name) - - __repr__ = __str__ - - def posix_error(filename): - errno = ctypes.get_errno() - exc = OSError(errno, strerror(errno)) - exc.filename = filename - return exc - - def scandir_python(path=unicode('.')): - """Like os.listdir(), but yield DirEntry objects instead of returning - a list of names. - """ - if isinstance(path, bytes): - opendir_path = path - is_bytes = True - else: - opendir_path = path.encode(file_system_encoding) - is_bytes = False - dir_p = opendir(opendir_path) - if not dir_p: - raise posix_error(path) - try: - result = Dirent_p() - while True: - entry = Dirent() - if readdir_r(dir_p, entry, result): - raise posix_error(path) - if not result: - break - name = entry.d_name - if name not in (b'.', b'..'): - if not is_bytes: - name = name.decode(file_system_encoding) - yield PosixDirEntry(path, name, entry.d_type, entry.d_ino) - finally: - if closedir(dir_p): - raise posix_error(path) - - if _scandir is not None: - scandir_c = _scandir.scandir - DirEntry_c = _scandir.DirEntry - - if _scandir is not None: - scandir = scandir_c - DirEntry = DirEntry_c - elif ctypes is not None and have_dirent_d_type: - scandir = scandir_python - DirEntry = PosixDirEntry - else: - scandir = scandir_generic - DirEntry = GenericDirEntry - - -# Some other system -- no d_type or stat information -else: - scandir = scandir_generic - DirEntry = GenericDirEntry - - -def _walk(top, topdown=True, onerror=None, followlinks=False): - """Like Python 3.5's implementation of os.walk() -- faster than - the pre-Python 3.5 version as it uses scandir() internally. - """ - dirs = [] - nondirs = [] - - # We may not have read permission for top, in which case we can't - # get a list of the files the directory contains. os.walk - # always suppressed the exception then, rather than blow up for a - # minor reason when (say) a thousand readable directories are still - # left to visit. That logic is copied here. - try: - scandir_it = scandir(top) - except OSError as error: - if onerror is not None: - onerror(error) - return - - while True: - try: - try: - entry = next(scandir_it) - except StopIteration: - break - except OSError as error: - if onerror is not None: - onerror(error) - return - - try: - is_dir = entry.is_dir() - except OSError: - # If is_dir() raises an OSError, consider that the entry is not - # a directory, same behaviour than os.path.isdir(). - is_dir = False - - if is_dir: - dirs.append(entry.name) - else: - nondirs.append(entry.name) - - if not topdown and is_dir: - # Bottom-up: recurse into sub-directory, but exclude symlinks to - # directories if followlinks is False - if followlinks: - walk_into = True - else: - try: - is_symlink = entry.is_symlink() - except OSError: - # If is_symlink() raises an OSError, consider that the - # entry is not a symbolic link, same behaviour than - # os.path.islink(). - is_symlink = False - walk_into = not is_symlink - - if walk_into: - for entry in walk(entry.path, topdown, onerror, followlinks): - yield entry - - # Yield before recursion if going top down - if topdown: - yield top, dirs, nondirs - - # Recurse into sub-directories - for name in dirs: - new_path = join(top, name) - # Issue #23605: os.path.islink() is used instead of caching - # entry.is_symlink() result during the loop on os.scandir() because - # the caller can replace the directory entry during the "yield" - # above. - if followlinks or not islink(new_path): - for entry in walk(new_path, topdown, onerror, followlinks): - yield entry - else: - # Yield after recursion if going bottom up - yield top, dirs, nondirs - - -if IS_PY3 or sys.platform != 'win32': - walk = _walk -else: - # Fix for broken unicode handling on Windows on Python 2.x, see: - # https://github.com/benhoyt/scandir/issues/54 - file_system_encoding = sys.getfilesystemencoding() - - def walk(top, topdown=True, onerror=None, followlinks=False): - if isinstance(top, bytes): - top = top.decode(file_system_encoding) - return _walk(top, topdown, onerror, followlinks) diff --git a/lib/sg_futures/__init__.py b/lib/sg_futures/__init__.py index 2160ea97..76abd7b0 100644 --- a/lib/sg_futures/__init__.py +++ b/lib/sg_futures/__init__.py @@ -14,9 +14,5 @@ # # You should have received a copy of the GNU General Public License # along with SickGear. If not, see . -import sys -if 2 == sys.version_info[0]: - from .py2 import * -else: - from .py3 import * +from .py3 import * diff --git a/lib/sg_futures/base.py b/lib/sg_futures/base.py index 606bf195..e041c595 100644 --- a/lib/sg_futures/base.py +++ b/lib/sg_futures/base.py @@ -1,13 +1,8 @@ import re -import sys import threading -if 2 == sys.version_info[0]: - # noinspection PyProtectedMember - from .futures.thread import _WorkItem -else: - # noinspection PyCompatibility,PyProtectedMember - from concurrent.futures.thread import _WorkItem +# noinspection PyProtectedMember,PyUnresolvedReferences +from concurrent.futures.thread import _WorkItem class GenericWorkItem(_WorkItem): diff --git a/lib/sg_futures/futures/__init__.py b/lib/sg_futures/futures/__init__.py deleted file mode 100644 index e1c1545f..00000000 --- a/lib/sg_futures/futures/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Execute computations asynchronously using threads or processes.""" - -__author__ = 'Brian Quinlan (brian@sweetapp.com)' - -from ._base import (FIRST_COMPLETED, - FIRST_EXCEPTION, - ALL_COMPLETED, - CancelledError, - TimeoutError, - Future, - Executor, - wait, - as_completed) -from .thread import ThreadPoolExecutor - -try: - from .process import ProcessPoolExecutor -except ImportError: - # some platforms don't have multiprocessing - pass diff --git a/lib/sg_futures/futures/_base.py b/lib/sg_futures/futures/_base.py deleted file mode 100644 index f7f525f6..00000000 --- a/lib/sg_futures/futures/_base.py +++ /dev/null @@ -1,673 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -import collections -import logging -import threading -import itertools -import time -import types - -__author__ = 'Brian Quinlan (brian@sweetapp.com)' - -FIRST_COMPLETED = 'FIRST_COMPLETED' -FIRST_EXCEPTION = 'FIRST_EXCEPTION' -ALL_COMPLETED = 'ALL_COMPLETED' -_AS_COMPLETED = '_AS_COMPLETED' - -# Possible future states (for internal use by the futures package). -PENDING = 'PENDING' -RUNNING = 'RUNNING' -# The future was cancelled by the user... -CANCELLED = 'CANCELLED' -# ...and _Waiter.add_cancelled() was called by a worker. -CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' -FINISHED = 'FINISHED' - -_FUTURE_STATES = [ - PENDING, - RUNNING, - CANCELLED, - CANCELLED_AND_NOTIFIED, - FINISHED -] - -_STATE_TO_DESCRIPTION_MAP = { - PENDING: "pending", - RUNNING: "running", - CANCELLED: "cancelled", - CANCELLED_AND_NOTIFIED: "cancelled", - FINISHED: "finished" -} - -# Logger for internal use by the futures package. -LOGGER = logging.getLogger("concurrent.futures") - -class Error(Exception): - """Base class for all future-related exceptions.""" - pass - -class CancelledError(Error): - """The Future was cancelled.""" - pass - -class TimeoutError(Error): - """The operation exceeded the given deadline.""" - pass - -class _Waiter(object): - """Provides the event that wait() and as_completed() block on.""" - def __init__(self): - self.event = threading.Event() - self.finished_futures = [] - - def add_result(self, future): - self.finished_futures.append(future) - - def add_exception(self, future): - self.finished_futures.append(future) - - def add_cancelled(self, future): - self.finished_futures.append(future) - -class _AsCompletedWaiter(_Waiter): - """Used by as_completed().""" - - def __init__(self): - super(_AsCompletedWaiter, self).__init__() - self.lock = threading.Lock() - - def add_result(self, future): - with self.lock: - super(_AsCompletedWaiter, self).add_result(future) - self.event.set() - - def add_exception(self, future): - with self.lock: - super(_AsCompletedWaiter, self).add_exception(future) - self.event.set() - - def add_cancelled(self, future): - with self.lock: - super(_AsCompletedWaiter, self).add_cancelled(future) - self.event.set() - -class _FirstCompletedWaiter(_Waiter): - """Used by wait(return_when=FIRST_COMPLETED).""" - - def add_result(self, future): - super(_FirstCompletedWaiter, self).add_result(future) - self.event.set() - - def add_exception(self, future): - super(_FirstCompletedWaiter, self).add_exception(future) - self.event.set() - - def add_cancelled(self, future): - super(_FirstCompletedWaiter, self).add_cancelled(future) - self.event.set() - -class _AllCompletedWaiter(_Waiter): - """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" - - def __init__(self, num_pending_calls, stop_on_exception): - self.num_pending_calls = num_pending_calls - self.stop_on_exception = stop_on_exception - self.lock = threading.Lock() - super(_AllCompletedWaiter, self).__init__() - - def _decrement_pending_calls(self): - with self.lock: - self.num_pending_calls -= 1 - if not self.num_pending_calls: - self.event.set() - - def add_result(self, future): - super(_AllCompletedWaiter, self).add_result(future) - self._decrement_pending_calls() - - def add_exception(self, future): - super(_AllCompletedWaiter, self).add_exception(future) - if self.stop_on_exception: - self.event.set() - else: - self._decrement_pending_calls() - - def add_cancelled(self, future): - super(_AllCompletedWaiter, self).add_cancelled(future) - self._decrement_pending_calls() - -class _AcquireFutures(object): - """A context manager that does an ordered acquire of Future conditions.""" - - def __init__(self, futures): - self.futures = sorted(futures, key=id) - - def __enter__(self): - for future in self.futures: - future._condition.acquire() - - def __exit__(self, *args): - for future in self.futures: - future._condition.release() - -def _create_and_install_waiters(fs, return_when): - if return_when == _AS_COMPLETED: - waiter = _AsCompletedWaiter() - elif return_when == FIRST_COMPLETED: - waiter = _FirstCompletedWaiter() - else: - pending_count = sum( - f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) - - if return_when == FIRST_EXCEPTION: - waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) - elif return_when == ALL_COMPLETED: - waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) - else: - raise ValueError("Invalid return condition: %r" % return_when) - - for f in fs: - f._waiters.append(waiter) - - return waiter - - -def _yield_finished_futures(fs, waiter, ref_collect): - """ - Iterate on the list *fs*, yielding finished futures one by one in - reverse order. - Before yielding a future, *waiter* is removed from its waiters - and the future is removed from each set in the collection of sets - *ref_collect*. - - The aim of this function is to avoid keeping stale references after - the future is yielded and before the iterator resumes. - """ - while fs: - f = fs[-1] - for futures_set in ref_collect: - futures_set.remove(f) - with f._condition: - f._waiters.remove(waiter) - del f - # Careful not to keep a reference to the popped value - yield fs.pop() - - -def as_completed(fs, timeout=None): - """An iterator over the given futures that yields each as it completes. - - Args: - fs: The sequence of Futures (possibly created by different Executors) to - iterate over. - timeout: The maximum number of seconds to wait. If None, then there - is no limit on the wait time. - - Returns: - An iterator that yields the given Futures as they complete (finished or - cancelled). If any given Futures are duplicated, they will be returned - once. - - Raises: - TimeoutError: If the entire result iterator could not be generated - before the given timeout. - """ - if timeout is not None: - end_time = timeout + time.time() - - fs = set(fs) - total_futures = len(fs) - with _AcquireFutures(fs): - finished = set( - f for f in fs - if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) - pending = fs - finished - waiter = _create_and_install_waiters(fs, _AS_COMPLETED) - finished = list(finished) - try: - for f in _yield_finished_futures(finished, waiter, - ref_collect=(fs,)): - f = [f] - yield f.pop() - - while pending: - if timeout is None: - wait_timeout = None - else: - wait_timeout = end_time - time.time() - if wait_timeout < 0: - raise TimeoutError( - '%d (of %d) futures unfinished' % ( - len(pending), total_futures)) - - waiter.event.wait(wait_timeout) - - with waiter.lock: - finished = waiter.finished_futures - waiter.finished_futures = [] - waiter.event.clear() - - # reverse to keep finishing order - finished.reverse() - for f in _yield_finished_futures(finished, waiter, - ref_collect=(fs, pending)): - f = [f] - yield f.pop() - - finally: - # Remove waiter from unfinished futures - for f in fs: - with f._condition: - f._waiters.remove(waiter) - -DoneAndNotDoneFutures = collections.namedtuple( - 'DoneAndNotDoneFutures', 'done not_done') -def wait(fs, timeout=None, return_when=ALL_COMPLETED): - """Wait for the futures in the given sequence to complete. - - Args: - fs: The sequence of Futures (possibly created by different Executors) to - wait upon. - timeout: The maximum number of seconds to wait. If None, then there - is no limit on the wait time. - return_when: Indicates when this function should return. The options - are: - - FIRST_COMPLETED - Return when any future finishes or is - cancelled. - FIRST_EXCEPTION - Return when any future finishes by raising an - exception. If no future raises an exception - then it is equivalent to ALL_COMPLETED. - ALL_COMPLETED - Return when all futures finish or are cancelled. - - Returns: - A named 2-tuple of sets. The first set, named 'done', contains the - futures that completed (is finished or cancelled) before the wait - completed. The second set, named 'not_done', contains uncompleted - futures. - """ - with _AcquireFutures(fs): - done = set(f for f in fs - if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) - not_done = set(fs) - done - - if (return_when == FIRST_COMPLETED) and done: - return DoneAndNotDoneFutures(done, not_done) - elif (return_when == FIRST_EXCEPTION) and done: - if any(f for f in done - if not f.cancelled() and f.exception() is not None): - return DoneAndNotDoneFutures(done, not_done) - - if len(done) == len(fs): - return DoneAndNotDoneFutures(done, not_done) - - waiter = _create_and_install_waiters(fs, return_when) - - waiter.event.wait(timeout) - for f in fs: - with f._condition: - f._waiters.remove(waiter) - - done.update(waiter.finished_futures) - return DoneAndNotDoneFutures(done, set(fs) - done) - -class Future(object): - """Represents the result of an asynchronous computation.""" - - def __init__(self): - """Initializes the future. Should not be called by clients.""" - self._condition = threading.Condition() - self._state = PENDING - self._result = None - self._exception = None - self._traceback = None - self._waiters = [] - self._done_callbacks = [] - - def _invoke_callbacks(self): - for callback in self._done_callbacks: - try: - callback(self) - except Exception: - LOGGER.exception('exception calling callback for %r', self) - except BaseException: - # Explicitly let all other new-style exceptions through so - # that we can catch all old-style exceptions with a simple - # "except:" clause below. - # - # All old-style exception objects are instances of - # types.InstanceType, but "except types.InstanceType:" does - # not catch old-style exceptions for some reason. Thus, the - # only way to catch all old-style exceptions without catching - # any new-style exceptions is to filter out the new-style - # exceptions, which all derive from BaseException. - raise - except: - # Because of the BaseException clause above, this handler only - # executes for old-style exception objects. - LOGGER.exception('exception calling callback for %r', self) - - def __repr__(self): - with self._condition: - if self._state == FINISHED: - if self._exception: - return '<%s at %#x state=%s raised %s>' % ( - self.__class__.__name__, - id(self), - _STATE_TO_DESCRIPTION_MAP[self._state], - self._exception.__class__.__name__) - else: - return '<%s at %#x state=%s returned %s>' % ( - self.__class__.__name__, - id(self), - _STATE_TO_DESCRIPTION_MAP[self._state], - self._result.__class__.__name__) - return '<%s at %#x state=%s>' % ( - self.__class__.__name__, - id(self), - _STATE_TO_DESCRIPTION_MAP[self._state]) - - def cancel(self): - """Cancel the future if possible. - - Returns True if the future was cancelled, False otherwise. A future - cannot be cancelled if it is running or has already completed. - """ - with self._condition: - if self._state in [RUNNING, FINISHED]: - return False - - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - return True - - self._state = CANCELLED - self._condition.notify_all() - - self._invoke_callbacks() - return True - - def cancelled(self): - """Return True if the future was cancelled.""" - with self._condition: - return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] - - def running(self): - """Return True if the future is currently executing.""" - with self._condition: - return self._state == RUNNING - - def done(self): - """Return True of the future was cancelled or finished executing.""" - with self._condition: - return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] - - def __get_result(self): - if self._exception: - if isinstance(self._exception, types.InstanceType): - # The exception is an instance of an old-style class, which - # means type(self._exception) returns types.ClassType instead - # of the exception's actual class type. - exception_type = self._exception.__class__ - else: - exception_type = type(self._exception) - raise exception_type, self._exception, self._traceback - else: - return self._result - - def add_done_callback(self, fn): - """Attaches a callable that will be called when the future finishes. - - Args: - fn: A callable that will be called with this future as its only - argument when the future completes or is cancelled. The callable - will always be called by a thread in the same process in which - it was added. If the future has already completed or been - cancelled then the callable will be called immediately. These - callables are called in the order that they were added. - """ - with self._condition: - if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: - self._done_callbacks.append(fn) - return - fn(self) - - def result(self, timeout=None): - """Return the result of the call that the future represents. - - Args: - timeout: The number of seconds to wait for the result if the future - isn't done. If None, then there is no limit on the wait time. - - Returns: - The result of the call that the future represents. - - Raises: - CancelledError: If the future was cancelled. - TimeoutError: If the future didn't finish executing before the given - timeout. - Exception: If the call raised then that exception will be raised. - """ - with self._condition: - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - raise CancelledError() - elif self._state == FINISHED: - return self.__get_result() - - self._condition.wait(timeout) - - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - raise CancelledError() - elif self._state == FINISHED: - return self.__get_result() - else: - raise TimeoutError() - - def exception_info(self, timeout=None): - """Return a tuple of (exception, traceback) raised by the call that the - future represents. - - Args: - timeout: The number of seconds to wait for the exception if the - future isn't done. If None, then there is no limit on the wait - time. - - Returns: - The exception raised by the call that the future represents or None - if the call completed without raising. - - Raises: - CancelledError: If the future was cancelled. - TimeoutError: If the future didn't finish executing before the given - timeout. - """ - with self._condition: - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - raise CancelledError() - elif self._state == FINISHED: - return self._exception, self._traceback - - self._condition.wait(timeout) - - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - raise CancelledError() - elif self._state == FINISHED: - return self._exception, self._traceback - else: - raise TimeoutError() - - def exception(self, timeout=None): - """Return the exception raised by the call that the future represents. - - Args: - timeout: The number of seconds to wait for the exception if the - future isn't done. If None, then there is no limit on the wait - time. - - Returns: - The exception raised by the call that the future represents or None - if the call completed without raising. - - Raises: - CancelledError: If the future was cancelled. - TimeoutError: If the future didn't finish executing before the given - timeout. - """ - return self.exception_info(timeout)[0] - - # The following methods should only be used by Executors and in tests. - def set_running_or_notify_cancel(self): - """Mark the future as running or process any cancel notifications. - - Should only be used by Executor implementations and unit tests. - - If the future has been cancelled (cancel() was called and returned - True) then any threads waiting on the future completing (though calls - to as_completed() or wait()) are notified and False is returned. - - If the future was not cancelled then it is put in the running state - (future calls to running() will return True) and True is returned. - - This method should be called by Executor implementations before - executing the work associated with this future. If this method returns - False then the work should not be executed. - - Returns: - False if the Future was cancelled, True otherwise. - - Raises: - RuntimeError: if this method was already called or if set_result() - or set_exception() was called. - """ - with self._condition: - if self._state == CANCELLED: - self._state = CANCELLED_AND_NOTIFIED - for waiter in self._waiters: - waiter.add_cancelled(self) - # self._condition.notify_all() is not necessary because - # self.cancel() triggers a notification. - return False - elif self._state == PENDING: - self._state = RUNNING - return True - else: - LOGGER.critical('Future %s in unexpected state: %s', - id(self), - self._state) - raise RuntimeError('Future in unexpected state') - - def set_result(self, result): - """Sets the return value of work associated with the future. - - Should only be used by Executor implementations and unit tests. - """ - with self._condition: - self._result = result - self._state = FINISHED - for waiter in self._waiters: - waiter.add_result(self) - self._condition.notify_all() - self._invoke_callbacks() - - def set_exception_info(self, exception, traceback): - """Sets the result of the future as being the given exception - and traceback. - - Should only be used by Executor implementations and unit tests. - """ - with self._condition: - self._exception = exception - self._traceback = traceback - self._state = FINISHED - for waiter in self._waiters: - waiter.add_exception(self) - self._condition.notify_all() - self._invoke_callbacks() - - def set_exception(self, exception): - """Sets the result of the future as being the given exception. - - Should only be used by Executor implementations and unit tests. - """ - self.set_exception_info(exception, None) - -class Executor(object): - """This is an abstract base class for concrete asynchronous executors.""" - - def submit(self, fn, *args, **kwargs): - """Submits a callable to be executed with the given arguments. - - Schedules the callable to be executed as fn(*args, **kwargs) and returns - a Future instance representing the execution of the callable. - - Returns: - A Future representing the given call. - """ - raise NotImplementedError() - - def map(self, fn, *iterables, **kwargs): - """Returns an iterator equivalent to map(fn, iter). - - Args: - fn: A callable that will take as many arguments as there are - passed iterables. - timeout: The maximum number of seconds to wait. If None, then there - is no limit on the wait time. - - Returns: - An iterator equivalent to: map(func, *iterables) but the calls may - be evaluated out-of-order. - - Raises: - TimeoutError: If the entire result iterator could not be generated - before the given timeout. - Exception: If fn(*args) raises for any values. - """ - timeout = kwargs.get('timeout') - if timeout is not None: - end_time = timeout + time.time() - - fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)] - - # Yield must be hidden in closure so that the futures are submitted - # before the first iterator value is required. - def result_iterator(): - try: - # reverse to keep finishing order - fs.reverse() - while fs: - # Careful not to keep a reference to the popped future - if timeout is None: - yield fs.pop().result() - else: - yield fs.pop().result(end_time - time.time()) - finally: - for future in fs: - future.cancel() - return result_iterator() - - def shutdown(self, wait=True): - """Clean-up the resources associated with the Executor. - - It is safe to call this method several times. Otherwise, no other - methods can be called after this one. - - Args: - wait: If True then shutdown will not return until all running - futures have finished executing and the resources used by the - executor have been reclaimed. - """ - pass - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.shutdown(wait=True) - return False - - -class BrokenExecutor(RuntimeError): - """ - Raised when a executor has become non-functional after a severe failure. - """ diff --git a/lib/sg_futures/futures/process.py b/lib/sg_futures/futures/process.py deleted file mode 100644 index 5ba8db85..00000000 --- a/lib/sg_futures/futures/process.py +++ /dev/null @@ -1,363 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Implements ProcessPoolExecutor. - -The follow diagram and text describe the data-flow through the system: - -|======================= In-process =====================|== Out-of-process ==| - -+----------+ +----------+ +--------+ +-----------+ +---------+ -| | => | Work Ids | => | | => | Call Q | => | | -| | +----------+ | | +-----------+ | | -| | | ... | | | | ... | | | -| | | 6 | | | | 5, call() | | | -| | | 7 | | | | ... | | | -| Process | | ... | | Local | +-----------+ | Process | -| Pool | +----------+ | Worker | | #1..n | -| Executor | | Thread | | | -| | +----------- + | | +-----------+ | | -| | <=> | Work Items | <=> | | <= | Result Q | <= | | -| | +------------+ | | +-----------+ | | -| | | 6: call() | | | | ... | | | -| | | future | | | | 4, result | | | -| | | ... | | | | 3, except | | | -+----------+ +------------+ +--------+ +-----------+ +---------+ - -Executor.submit() called: -- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict -- adds the id of the _WorkItem to the "Work Ids" queue - -Local worker thread: -- reads work ids from the "Work Ids" queue and looks up the corresponding - WorkItem from the "Work Items" dict: if the work item has been cancelled then - it is simply removed from the dict, otherwise it is repackaged as a - _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" - until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because - calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). -- reads _ResultItems from "Result Q", updates the future stored in the - "Work Items" dict and deletes the dict entry - -Process #1..n: -- reads _CallItems from "Call Q", executes the calls, and puts the resulting - _ResultItems in "Request Q" -""" - -import atexit -from . import _base -import Queue as queue -import multiprocessing -import threading -import weakref -import sys - -__author__ = 'Brian Quinlan (brian@sweetapp.com)' - -# Workers are created as daemon threads and processes. This is done to allow the -# interpreter to exit when there are still idle processes in a -# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, -# allowing workers to die with the interpreter has two undesirable properties: -# - The workers would still be running during interpretor shutdown, -# meaning that they would fail in unpredictable ways. -# - The workers could be killed while evaluating a work item, which could -# be bad if the callable being evaluated has external side-effects e.g. -# writing to a file. -# -# To work around this problem, an exit handler is installed which tells the -# workers to exit when their work queues are empty and then waits until the -# threads/processes finish. - -_threads_queues = weakref.WeakKeyDictionary() -_shutdown = False - -def _python_exit(): - global _shutdown - _shutdown = True - items = list(_threads_queues.items()) if _threads_queues else () - for t, q in items: - q.put(None) - for t, q in items: - t.join(sys.maxint) - -# Controls how many more calls than processes will be queued in the call queue. -# A smaller number will mean that processes spend more time idle waiting for -# work while a larger number will make Future.cancel() succeed less frequently -# (Futures in the call queue cannot be cancelled). -EXTRA_QUEUED_CALLS = 1 - -class _WorkItem(object): - def __init__(self, future, fn, args, kwargs): - self.future = future - self.fn = fn - self.args = args - self.kwargs = kwargs - -class _ResultItem(object): - def __init__(self, work_id, exception=None, result=None): - self.work_id = work_id - self.exception = exception - self.result = result - -class _CallItem(object): - def __init__(self, work_id, fn, args, kwargs): - self.work_id = work_id - self.fn = fn - self.args = args - self.kwargs = kwargs - -def _process_worker(call_queue, result_queue): - """Evaluates calls from call_queue and places the results in result_queue. - - This worker is run in a separate process. - - Args: - call_queue: A multiprocessing.Queue of _CallItems that will be read and - evaluated by the worker. - result_queue: A multiprocessing.Queue of _ResultItems that will written - to by the worker. - shutdown: A multiprocessing.Event that will be set as a signal to the - worker that it should exit when call_queue is empty. - """ - while True: - call_item = call_queue.get(block=True) - if call_item is None: - # Wake up queue management thread - result_queue.put(None) - return - try: - r = call_item.fn(*call_item.args, **call_item.kwargs) - except: - e = sys.exc_info()[1] - result_queue.put(_ResultItem(call_item.work_id, - exception=e)) - else: - result_queue.put(_ResultItem(call_item.work_id, - result=r)) - -def _add_call_item_to_queue(pending_work_items, - work_ids, - call_queue): - """Fills call_queue with _WorkItems from pending_work_items. - - This function never blocks. - - Args: - pending_work_items: A dict mapping work ids to _WorkItems e.g. - {5: <_WorkItem...>, 6: <_WorkItem...>, ...} - work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids - are consumed and the corresponding _WorkItems from - pending_work_items are transformed into _CallItems and put in - call_queue. - call_queue: A multiprocessing.Queue that will be filled with _CallItems - derived from _WorkItems. - """ - while True: - if call_queue.full(): - return - try: - work_id = work_ids.get(block=False) - except queue.Empty: - return - else: - work_item = pending_work_items[work_id] - - if work_item.future.set_running_or_notify_cancel(): - call_queue.put(_CallItem(work_id, - work_item.fn, - work_item.args, - work_item.kwargs), - block=True) - else: - del pending_work_items[work_id] - continue - -def _queue_management_worker(executor_reference, - processes, - pending_work_items, - work_ids_queue, - call_queue, - result_queue): - """Manages the communication between this process and the worker processes. - - This function is run in a local thread. - - Args: - executor_reference: A weakref.ref to the ProcessPoolExecutor that owns - this thread. Used to determine if the ProcessPoolExecutor has been - garbage collected and that this function can exit. - process: A list of the multiprocessing.Process instances used as - workers. - pending_work_items: A dict mapping work ids to _WorkItems e.g. - {5: <_WorkItem...>, 6: <_WorkItem...>, ...} - work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). - call_queue: A multiprocessing.Queue that will be filled with _CallItems - derived from _WorkItems for processing by the process workers. - result_queue: A multiprocessing.Queue of _ResultItems generated by the - process workers. - """ - nb_shutdown_processes = [0] - def shutdown_one_process(): - """Tell a worker to terminate, which will in turn wake us again""" - call_queue.put(None) - nb_shutdown_processes[0] += 1 - while True: - _add_call_item_to_queue(pending_work_items, - work_ids_queue, - call_queue) - - result_item = result_queue.get(block=True) - if result_item is not None: - work_item = pending_work_items[result_item.work_id] - del pending_work_items[result_item.work_id] - - if result_item.exception: - work_item.future.set_exception(result_item.exception) - else: - work_item.future.set_result(result_item.result) - # Delete references to object. See issue16284 - del work_item - # Check whether we should start shutting down. - executor = executor_reference() - # No more work items can be added if: - # - The interpreter is shutting down OR - # - The executor that owns this worker has been collected OR - # - The executor that owns this worker has been shutdown. - if _shutdown or executor is None or executor._shutdown_thread: - # Since no new work items can be added, it is safe to shutdown - # this thread if there are no pending work items. - if not pending_work_items: - while nb_shutdown_processes[0] < len(processes): - shutdown_one_process() - # If .join() is not called on the created processes then - # some multiprocessing.Queue methods may deadlock on Mac OS - # X. - for p in processes: - p.join() - call_queue.close() - return - del executor - -_system_limits_checked = False -_system_limited = None -def _check_system_limits(): - global _system_limits_checked, _system_limited - if _system_limits_checked: - if _system_limited: - raise NotImplementedError(_system_limited) - _system_limits_checked = True - try: - import os - nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") - except (AttributeError, ValueError): - # sysconf not available or setting not available - return - if nsems_max == -1: - # indetermine limit, assume that limit is determined - # by available memory only - return - if nsems_max >= 256: - # minimum number of semaphores available - # according to POSIX - return - _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max - raise NotImplementedError(_system_limited) - - -class ProcessPoolExecutor(_base.Executor): - def __init__(self, max_workers=None): - """Initializes a new ProcessPoolExecutor instance. - - Args: - max_workers: The maximum number of processes that can be used to - execute the given calls. If None or not given then as many - worker processes will be created as the machine has processors. - """ - _check_system_limits() - - if max_workers is None: - self._max_workers = multiprocessing.cpu_count() - else: - if max_workers <= 0: - raise ValueError("max_workers must be greater than 0") - - self._max_workers = max_workers - - # Make the call queue slightly larger than the number of processes to - # prevent the worker processes from idling. But don't make it too big - # because futures in the call queue cannot be cancelled. - self._call_queue = multiprocessing.Queue(self._max_workers + - EXTRA_QUEUED_CALLS) - self._result_queue = multiprocessing.Queue() - self._work_ids = queue.Queue() - self._queue_management_thread = None - self._processes = set() - - # Shutdown is a two-step process. - self._shutdown_thread = False - self._shutdown_lock = threading.Lock() - self._queue_count = 0 - self._pending_work_items = {} - - def _start_queue_management_thread(self): - # When the executor gets lost, the weakref callback will wake up - # the queue management thread. - def weakref_cb(_, q=self._result_queue): - q.put(None) - if self._queue_management_thread is None: - self._queue_management_thread = threading.Thread( - target=_queue_management_worker, - args=(weakref.ref(self, weakref_cb), - self._processes, - self._pending_work_items, - self._work_ids, - self._call_queue, - self._result_queue)) - self._queue_management_thread.daemon = True - self._queue_management_thread.start() - _threads_queues[self._queue_management_thread] = self._result_queue - - def _adjust_process_count(self): - for _ in range(len(self._processes), self._max_workers): - p = multiprocessing.Process( - target=_process_worker, - args=(self._call_queue, - self._result_queue)) - p.start() - self._processes.add(p) - - def submit(self, fn, *args, **kwargs): - with self._shutdown_lock: - if self._shutdown_thread: - raise RuntimeError('cannot schedule new futures after shutdown') - - f = _base.Future() - w = _WorkItem(f, fn, args, kwargs) - - self._pending_work_items[self._queue_count] = w - self._work_ids.put(self._queue_count) - self._queue_count += 1 - # Wake up queue management thread - self._result_queue.put(None) - - self._start_queue_management_thread() - self._adjust_process_count() - return f - submit.__doc__ = _base.Executor.submit.__doc__ - - def shutdown(self, wait=True): - with self._shutdown_lock: - self._shutdown_thread = True - if self._queue_management_thread: - # Wake up queue management thread - self._result_queue.put(None) - if wait: - self._queue_management_thread.join(sys.maxint) - # To reduce the risk of openning too many files, remove references to - # objects that use file descriptors. - self._queue_management_thread = None - self._call_queue = None - self._result_queue = None - self._processes = None - shutdown.__doc__ = _base.Executor.shutdown.__doc__ - -atexit.register(_python_exit) diff --git a/lib/sg_futures/futures/thread.py b/lib/sg_futures/futures/thread.py deleted file mode 100644 index f593de40..00000000 --- a/lib/sg_futures/futures/thread.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Implements ThreadPoolExecutor.""" - -import atexit -from six import PY2 -if PY2: - from . import _base -else: - from concurrent.futures import _base -import itertools -import Queue as queue -import threading -import weakref -import sys - -try: - from multiprocessing import cpu_count -except ImportError: - # some platforms don't have multiprocessing - def cpu_count(): - return None - -__author__ = 'Brian Quinlan (brian@sweetapp.com)' - -# Workers are created as daemon threads. This is done to allow the interpreter -# to exit when there are still idle threads in a ThreadPoolExecutor's thread -# pool (i.e. shutdown() was not called). However, allowing workers to die with -# the interpreter has two undesirable properties: -# - The workers would still be running during interpretor shutdown, -# meaning that they would fail in unpredictable ways. -# - The workers could be killed while evaluating a work item, which could -# be bad if the callable being evaluated has external side-effects e.g. -# writing to a file. -# -# To work around this problem, an exit handler is installed which tells the -# workers to exit when their work queues are empty and then waits until the -# threads finish. - -_threads_queues = weakref.WeakKeyDictionary() -_shutdown = False - -def _python_exit(): - global _shutdown - _shutdown = True - items = list(_threads_queues.items()) if _threads_queues else () - for t, q in items: - q.put(None) - for t, q in items: - t.join(sys.maxint) - -atexit.register(_python_exit) - -class _WorkItem(object): - def __init__(self, future, fn, args, kwargs): - self.future = future - self.fn = fn - self.args = args - self.kwargs = kwargs - - def run(self): - if not self.future.set_running_or_notify_cancel(): - return - - try: - result = self.fn(*self.args, **self.kwargs) - except: - e, tb = sys.exc_info()[1:] - self.future.set_exception_info(e, tb) - else: - self.future.set_result(result) - -def _worker(executor_reference, work_queue, initializer, initargs): - if initializer is not None: - try: - initializer(*initargs) - except BaseException: - _base.LOGGER.critical('Exception in initializer:', exc_info=True) - executor = executor_reference() - if executor is not None: - executor._initializer_failed() - return - try: - while True: - work_item = work_queue.get(block=True) - if work_item is not None: - work_item.run() - # Delete references to object. See issue16284 - del work_item - - # attempt to increment idle count - executor = executor_reference() - if executor is not None: - executor._idle_semaphore.release() - del executor - continue - executor = executor_reference() - # Exit if: - # - The interpreter is shutting down OR - # - The executor that owns the worker has been collected OR - # - The executor that owns the worker has been shutdown. - if _shutdown or executor is None or executor._shutdown: - # Notice other workers - work_queue.put(None) - return - del executor - except: - _base.LOGGER.critical('Exception in worker', exc_info=True) - - -class BrokenThreadPool(_base.BrokenExecutor): - """ - Raised when a worker thread in a ThreadPoolExecutor failed initializing. - """ - - -class ThreadPoolExecutor(_base.Executor): - - # Used to assign unique thread names when thread_name_prefix is not supplied. - _counter = itertools.count().next - - def __init__(self, max_workers=None, thread_name_prefix='', initializer=None, initargs=()): - """Initializes a new ThreadPoolExecutor instance. - - Args: - max_workers: The maximum number of threads that can be used to - execute the given calls. - thread_name_prefix: An optional name prefix to give our threads. - """ - if max_workers is None: - # Use this number because ThreadPoolExecutor is often - # used to overlap I/O instead of CPU work. - max_workers = (cpu_count() or 1) * 5 - if max_workers <= 0: - raise ValueError("max_workers must be greater than 0") - - self._max_workers = max_workers - self._initializer = initializer - self._initargs = initargs - self._work_queue = queue.Queue() - self._idle_semaphore = threading.Semaphore(0) - self._threads = set() - self._broken = False - self._shutdown = False - self._shutdown_lock = threading.Lock() - self._thread_name_prefix = (thread_name_prefix or - ("ThreadPoolExecutor-%d" % self._counter())) - - def submit(self, fn, *args, **kwargs): - with self._shutdown_lock: - if self._broken: - raise BrokenThreadPool(self._broken) - if self._shutdown: - raise RuntimeError('cannot schedule new futures after shutdown') - - f = _base.Future() - w = _WorkItem(f, fn, args, kwargs) - - self._work_queue.put(w) - self._adjust_thread_count() - return f - submit.__doc__ = _base.Executor.submit.__doc__ - - def _adjust_thread_count(self): - # if idle threads are available, don't spin new threads - if self._idle_semaphore.acquire(False): - return - - # When the executor gets lost, the weakref callback will wake up - # the worker threads. - def weakref_cb(_, q=self._work_queue): - q.put(None) - - num_threads = len(self._threads) - if num_threads < self._max_workers: - thread_name = '%s_%d' % (self._thread_name_prefix or self, - num_threads) - t = threading.Thread(name=thread_name, target=_worker, - args=(weakref.ref(self, weakref_cb), - self._work_queue, self._initializer, self._initargs)) - t.daemon = True - t.start() - self._threads.add(t) - _threads_queues[t] = self._work_queue - - def _initializer_failed(self): - with self._shutdown_lock: - self._broken = ('A thread initializer failed, the thread pool ' - 'is not usable anymore') - # Drain work queue and mark pending futures failed - while True: - try: - work_item = self._work_queue.get_nowait() - except queue.Empty: - break - if work_item is not None: - work_item.future.set_exception(BrokenThreadPool(self._broken)) - - def shutdown(self, wait=True): - with self._shutdown_lock: - self._shutdown = True - self._work_queue.put(None) - if wait: - for t in self._threads: - t.join(sys.maxint) - shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/lib/sg_futures/py2.py b/lib/sg_futures/py2.py deleted file mode 100644 index 45974445..00000000 --- a/lib/sg_futures/py2.py +++ /dev/null @@ -1,55 +0,0 @@ -# coding=utf-8 -# -# This file is part of SickGear. -# -# SickGear is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# SickGear is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with SickGear. If not, see . - -# noinspection PyUnresolvedReferences -import sys - -# noinspection PyProtectedMember -from .futures.thread import _base, BrokenThreadPool, ThreadPoolExecutor - -from .base import * - - -class SgWorkItem(GenericWorkItem): - - def run(self): - if self.future.set_running_or_notify_cancel(): - try: - self._set_thread_name() - result = self.fn(*self.args, **self.kwargs) - except (BaseException, Exception): - e, tb = sys.exc_info()[1:] - self.future.set_exception_info(e, tb) - else: - self.future.set_result(result) - - -class SgThreadPoolExecutor(ThreadPoolExecutor): - - def submit(self, fn, *args, **kwargs): - with self._shutdown_lock: - if self._broken: - raise BrokenThreadPool(self._broken) - if self._shutdown: - raise RuntimeError('cannot schedule new futures after shutdown') - - f = _base.Future() - w = SgWorkItem(f, fn, args, kwargs) - - self._work_queue.put(w) - self._adjust_thread_count() - return f diff --git a/lib/sg_helpers.py b/lib/sg_helpers.py index 4601b512..1b50aa5f 100644 --- a/lib/sg_helpers.py +++ b/lib/sg_helpers.py @@ -35,8 +35,8 @@ from send2trash import send2trash from encodingKludge import SYS_ENCODING import requests -from _23 import decode_bytes, filter_list, html_unescape, list_range, \ - ordered_dict, Popen, scandir, urlparse, urlsplit, urlunparse +from _23 import decode_bytes, html_unescape, list_range, \ + Popen, scandir, urlparse, urlsplit, urlunparse from six import integer_types, iteritems, iterkeys, itervalues, moves, PY2, string_types, text_type import zipfile @@ -810,8 +810,8 @@ def get_url(url, # type: AnyStr response_attr = ('text', 'content')[as_binary] # selectively mute some errors - mute = filter_list(lambda x: kwargs.pop(x, False), [ - 'mute_connect_err', 'mute_read_timeout', 'mute_connect_timeout', 'mute_http_error']) + mute = list(filter(lambda x: kwargs.pop(x, False), [ + 'mute_connect_err', 'mute_read_timeout', 'mute_connect_timeout', 'mute_http_error'])) # reuse or instantiate request session resp_sess = kwargs.pop('resp_sess', None) @@ -1617,12 +1617,12 @@ def ast_eval(value, default=None): return default if 'OrderedDict()' == value: - value = ordered_dict() + value = dict() elif 'OrderedDict([(' == value[0:14]: try: list_of_tuples = ast.literal_eval(value[12:-1]) - value = ordered_dict() + value = dict() for cur_tuple in list_of_tuples: value[cur_tuple[0]] = cur_tuple[1] except (BaseException, Exception): diff --git a/lib/tvinfo_base/base.py b/lib/tvinfo_base/base.py index a1d57bc4..7173aded 100644 --- a/lib/tvinfo_base/base.py +++ b/lib/tvinfo_base/base.py @@ -8,7 +8,6 @@ import time from exceptions_helper import ex from six import integer_types, iteritems, iterkeys, string_types, text_type -from _23 import list_items, list_values from lib.tvinfo_base.exceptions import * from sg_helpers import calc_age, make_path @@ -53,7 +52,7 @@ tv_src_names = { TVINFO_IMDB: 'imdb', TVINFO_TRAKT: 'trakt', TVINFO_TMDB: 'tmdb', - TVINFO_TVDB_SLUG : 'tvdb slug', + TVINFO_TVDB_SLUG: 'tvdb slug', TVINFO_TRAKT_SLUG: 'trakt slug', TVINFO_SLUG: 'generic slug', @@ -67,7 +66,7 @@ tv_src_names = { log = logging.getLogger('TVInfo') log.addHandler(logging.NullHandler()) -TVInfoShowContainer = {} # type: Dict[ShowContainer] +TVInfoShowContainer = {} # type: Dict[str, ShowContainer] class ShowContainer(dict): @@ -94,7 +93,7 @@ class ShowContainer(dict): if acquired_lock: try: current_time = time.time() - for k, v in list_items(self): + for k, v in list(self.items()): if self.max_age < current_time - v[1]: lock_acquired = self[k].lock.acquire(False) if lock_acquired: @@ -125,7 +124,7 @@ class TVInfoIDs(object): trakt=None, # type: integer_types rage=None, # type: integer_types ids=None # type: Dict[int, integer_types] - ): # type: (...) -> TVInfoIDs + ): ids = ids or {} self.tvdb = tvdb or ids.get(TVINFO_TVDB) self.tmdb = tmdb or ids.get(TVINFO_TMDB) @@ -156,7 +155,7 @@ class TVInfoIDs(object): class TVInfoSocialIDs(object): def __init__(self, twitter=None, instagram=None, facebook=None, wikipedia=None, ids=None): - # type: (str_int, str_int, str_int, str_int, Dict[int, str_int]) -> TVInfoSocialIDs + # type: (str_int, str_int, str_int, str_int, Dict[int, str_int]) -> None ids = ids or {} self.twitter = twitter or ids.get(TVINFO_TWITTER) self.instagram = instagram or ids.get(TVINFO_INSTAGRAM) @@ -231,7 +230,7 @@ class TVInfoImage(object): lang=None, height=None, width=None, aspect_ratio=None): self.img_id = img_id # type: Optional[integer_types] self.image_type = image_type # type: integer_types - self.sizes = sizes # type: Dict[TVInfoImageSize, AnyStr] + self.sizes = sizes # type: Dict[int, AnyStr] self.type_str = type_str # type: AnyStr self.main_image = main_image # type: bool self.rating = rating # type: Optional[Union[float, integer_types]] @@ -243,7 +242,7 @@ class TVInfoImage(object): def __str__(self): return '' % (TVInfoImageType.reverse_str.get(self.image_type, 'unknown'), - ', '.join(TVInfoImageSize.reverse_str.get(s, 'unkown') for s in self.sizes)) + ', '.join(TVInfoImageSize.reverse_str.get(s, 'unknown') for s in self.sizes)) __repr__ = __str__ @@ -409,7 +408,7 @@ class TVInfoShow(dict): match, and so on. """ results = [] - for cur_season in list_values(self): + for cur_season in self.values(): searchresult = cur_season.search(term=term, key=key) if 0 != len(searchresult): results.extend(searchresult) @@ -487,7 +486,7 @@ class TVInfoSeason(dict): instances. """ results = [] - for ep in list_values(self): + for ep in self.values(): searchresult = ep.search(term=term, key=key) if None is not searchresult: results.append(searchresult) @@ -679,7 +678,7 @@ class PersonBase(dict): ids=None, # type: Dict thumb_url=None, # type: AnyStr **kwargs # type: Dict - ): # type: (...) -> PersonBase + ): super(PersonBase, self).__init__(**kwargs) self.id = p_id # type: Optional[integer_types] self.name = name # type: Optional[AnyStr] @@ -769,7 +768,7 @@ class TVInfoPerson(PersonBase): real_name=None, # type: AnyStr akas=None, # type: Set[AnyStr] **kwargs # type: Dict - ): # type: (...) -> TVInfoPerson + ): super(TVInfoPerson, self).__init__( p_id=p_id, name=name, image=image, thumb_url=thumb_url, bio=bio, gender=gender, birthdate=birthdate, deathdate=deathdate, country=country, images=images, @@ -795,7 +794,7 @@ class TVInfoPerson(PersonBase): class TVInfoCharacter(PersonBase): def __init__(self, person=None, voice=None, plays_self=None, regular=None, show=None, start_year=None, end_year=None, **kwargs): - # type: (List[TVInfoPerson], bool, bool, bool, TVInfoShow, int, int, Dict) -> TVInfoCharacter + # type: (List[TVInfoPerson], bool, bool, bool, TVInfoShow, int, int, Dict) -> None super(TVInfoCharacter, self).__init__(**kwargs) self.person = person # type: List[TVInfoPerson] self.voice = voice # type: Optional[bool] diff --git a/sickgear/__init__.py b/sickgear/__init__.py index a0a0ed49..e827ddaf 100644 --- a/sickgear/__init__.py +++ b/sickgear/__init__.py @@ -55,8 +55,8 @@ from browser_ua import get_ua from configobj import ConfigObj from api_trakt import TraktAPI -from _23 import b64encodestring, decode_bytes, filter_iter, list_items, map_list, ordered_dict, scandir -from six import iteritems, PY2, string_types +from _23 import b64encodestring, decode_bytes, scandir +from six import iteritems, string_types import sg_helpers # noinspection PyUnreachableCode @@ -1353,10 +1353,10 @@ def init_stage_1(console_logging): EPISODE_VIEW_MISSED_RANGE = check_setting_int(CFG, 'GUI', 'episode_view_missed_range', 7) HISTORY_LAYOUT = check_setting_str(CFG, 'GUI', 'history_layout', 'detailed') - BROWSELIST_HIDDEN = map_list( + BROWSELIST_HIDDEN = list(map( lambda y: TVidProdid.glue in y and y or '%s%s%s' % ( (TVINFO_TVDB, TVINFO_IMDB)[bool(helpers.parse_imdb_id(y))], TVidProdid.glue, y), - [x.strip() for x in check_setting_str(CFG, 'GUI', 'browselist_hidden', '').split('|~|') if x.strip()]) + [x.strip() for x in check_setting_str(CFG, 'GUI', 'browselist_hidden', '').split('|~|') if x.strip()])) BROWSELIST_MRU = sg_helpers.ast_eval(check_setting_str(CFG, 'GUI', 'browselist_prefs', None), {}) BACKUP_DB_PATH = check_setting_str(CFG, 'Backup', 'backup_db_path', '') @@ -1450,7 +1450,7 @@ def init_stage_1(console_logging): setattr(nzb_prov, attr, check_setting_str(CFG, prov_id_uc, attr_check, default)) elif isinstance(default, int): setattr(nzb_prov, attr, check_setting_int(CFG, prov_id_uc, attr_check, default)) - for cur_provider in filter_iter(lambda p: abs(zlib.crc32(decode_bytes(p.name))) + 40000400 in ( + for cur_provider in filter(lambda p: abs(zlib.crc32(decode_bytes(p.name))) + 40000400 in ( 1449593765, 1597250020, 1524942228, 160758496, 2925374331 ) or (p.url and abs(zlib.crc32(decode_bytes(re.sub(r'[./]', '', p.url[-10:])))) + 40000400 in ( 2417143804,)), providers.sortedProviderList()): @@ -1505,24 +1505,6 @@ def init_stage_1(console_logging): pass logger.sb_log_instance.init_logging(console_logging=console_logging) - if PY2: - try: - import _scandir - except ImportError: - _scandir = None - - try: - import ctypes - except ImportError: - ctypes = None - - if None is not _scandir and None is not ctypes and not getattr(_scandir, 'DirEntry', None): - MODULE_UPDATE_STRING = \ - 'Your scandir binary module is outdated, using the slow but newer Python module.' \ - '
Upgrade the binary at a command prompt with' \ - ' # python -m pip install -U scandir' \ - '
Important: You must Shutdown SickGear before upgrading' - showList = [] showDict = {} @@ -1865,7 +1847,7 @@ def save_config(): # For passwords you must include the word `password` in the item_name and # add `helpers.encrypt(ITEM_NAME, ENCRYPTION_VERSION)` in save_config() - new_config['General'] = ordered_dict() + new_config['General'] = dict() s_z = check_setting_int(CFG, 'General', 'stack_size', 0) if s_z: new_config['General']['stack_size'] = s_z @@ -1927,7 +1909,8 @@ def save_config(): new_config['General']['flatten_folders_default'] = int(FLATTEN_FOLDERS_DEFAULT) new_config['General']['anime_default'] = int(ANIME_DEFAULT) new_config['General']['provider_order'] = ' '.join(PROVIDER_ORDER) - new_config['General']['provider_homes'] = '%s' % dict([(pid, v) for pid, v in list_items(PROVIDER_HOMES) if pid in [ + new_config['General']['provider_homes'] = '%s' % dict([(pid, v) for pid, v in list(PROVIDER_HOMES.items()) + if pid in [ p.get_id() for p in [x for x in providers.sortedProviderList() if GenericProvider.TORRENT == x.providerType]]]) new_config['General']['update_notify'] = int(UPDATE_NOTIFY) new_config['General']['update_auto'] = int(UPDATE_AUTO) @@ -2014,7 +1997,7 @@ def save_config(): new_config['Backup']['backup_db_max_count'] = BACKUP_DB_MAX_COUNT default_not_zero = ('enable_recentsearch', 'enable_backlog', 'enable_scheduled_backlog', 'use_after_get_data') - for src in filter_iter(lambda px: GenericProvider.TORRENT == px.providerType, providers.sortedProviderList()): + for src in filter(lambda px: GenericProvider.TORRENT == px.providerType, providers.sortedProviderList()): src_id = src.get_id() src_id_uc = src_id.upper() new_config[src_id_uc] = {} @@ -2052,19 +2035,19 @@ def save_config(): del new_config[src_id_uc] default_not_zero = ('enable_recentsearch', 'enable_backlog', 'enable_scheduled_backlog') - for src in filter_iter(lambda px: GenericProvider.NZB == px.providerType, providers.sortedProviderList()): + for src in filter(lambda px: GenericProvider.NZB == px.providerType, providers.sortedProviderList()): src_id = src.get_id() src_id_uc = src.get_id().upper() new_config[src_id_uc] = {} if int(src.enabled): new_config[src_id_uc][src_id] = int(src.enabled) - for attr in filter_iter(lambda _a: None is not getattr(src, _a, None), + for attr in filter(lambda _a: None is not getattr(src, _a, None), ('api_key', 'digest', 'username', 'search_mode')): if 'search_mode' != attr or 'eponly' != getattr(src, attr): new_config[src_id_uc]['%s_%s' % (src_id, attr)] = getattr(src, attr) - for attr in filter_iter(lambda _a: None is not getattr(src, _a, None), ( + for attr in filter(lambda _a: None is not getattr(src, _a, None), ( 'enable_recentsearch', 'enable_backlog', 'enable_scheduled_backlog', 'scene_only', 'scene_loose', 'scene_loose_active', 'scene_rej_nuked', 'scene_nuked_active', @@ -2280,7 +2263,7 @@ def save_config(): cfg_lc = cfg.lower() cfg_keys += [cfg] new_config[cfg] = {} - for (k, v) in filter_iter(lambda arg: any([arg[1]]) or ( + for (k, v) in filter(lambda arg: any([arg[1]]) or ( # allow saving where item value default is non-zero but 0 is a required setting value cfg_lc in ('kodi', 'xbmc', 'synoindex', 'nzbget', 'torrent', 'telegram') and arg[0] in ('always_on', 'priority', 'send_image')) @@ -2320,7 +2303,7 @@ def save_config(): new_config[notifier]['%s_notify_onsubtitledownload' % notifier.lower()] = int(onsubtitledownload) # remove empty stanzas - for k in filter_iter(lambda c: not new_config[c], cfg_keys): + for k in filter(lambda c: not new_config[c], cfg_keys): del new_config[k] new_config['Newznab'] = {} diff --git a/sickgear/_legacy.py b/sickgear/_legacy.py index 1727a4cb..cd6c2907 100644 --- a/sickgear/_legacy.py +++ b/sickgear/_legacy.py @@ -32,7 +32,7 @@ from tornado import gen from tornado.escape import utf8 from tornado.web import RequestHandler -from _23 import decode_str, filter_iter +from _23 import decode_str from six import iteritems from sg_futures import SgThreadPoolExecutor try: @@ -103,7 +103,7 @@ class LegacyBaseHandler(LegacyBase): def redirect_args(self, new_url, exclude=(None,), **kwargs): args = '&'.join(['%s=%s' % (k, v) for (k, v) in - filter_iter(lambda arg: arg[1] not in exclude, iteritems(kwargs))]) + filter(lambda arg: arg[1] not in exclude, iteritems(kwargs))]) self.redirect('%s%s' % (new_url, ('', '?' + args)[bool(args)]), permanent=True) """ deprecated from BaseHandler ------------------------------------------------------------------------------------ diff --git a/sickgear/classes.py b/sickgear/classes.py index 3cdecae0..5065e05a 100644 --- a/sickgear/classes.py +++ b/sickgear/classes.py @@ -25,7 +25,7 @@ import sickgear from ._legacy_classes import LegacySearchResult, LegacyProper from .common import Quality -from six import integer_types, iteritems, PY2, string_types +from six import integer_types, iteritems, string_types # noinspection PyUnreachableCode if False: @@ -359,41 +359,11 @@ class OrderedDefaultdict(OrderedDict): args = (self.default_factory,) if self.default_factory else () return self.__class__, args, None, None, iteritems(self) - if PY2: - # backport from python 3 - def move_to_end(self, key, last=True): - """Move an existing element to the end (or beginning if last==False). + def first_key(self): + return next(iter(self)) - Raises KeyError if the element does not exist. - When last=True, acts like a fast version of self[key]=self.pop(key). - - """ - link_prev, link_next, key = link = getattr(self, '_OrderedDict__map')[key] - link_prev[1] = link_next - link_next[0] = link_prev - root = getattr(self, '_OrderedDict__root') - if last: - last = root[0] - link[0] = last - link[1] = root - last[1] = root[0] = link - else: - first = root[1] - link[0] = root - link[1] = first - root[1] = first[0] = link - - def first_key(self): - return getattr(self, '_OrderedDict__root')[1][2] - - def last_key(self): - return getattr(self, '_OrderedDict__root')[0][2] - else: - def first_key(self): - return next(iter(self)) - - def last_key(self): - return next(reversed(self)) + def last_key(self): + return next(reversed(self)) class ImageUrlList(list): @@ -455,61 +425,14 @@ class EnvVar(object): pass def __getitem__(self, key): - return os.environ(key) + return os.environ[key] @staticmethod def get(key, default=None): return os.environ.get(key, default) -if not PY2: - sickgear.ENV = EnvVar() - -elif 'nt' == os.name: - from ctypes import windll, create_unicode_buffer - - # noinspection PyCompatibility - class WinEnvVar(EnvVar): - - @staticmethod - def get_environment_variable(name): - # noinspection PyUnresolvedReferences - name = unicode(name) # ensures string argument is unicode - n = windll.kernel32.GetEnvironmentVariableW(name, None, 0) - env_value = None - if n: - buf = create_unicode_buffer(u'\0' * n) - windll.kernel32.GetEnvironmentVariableW(name, buf, n) - env_value = buf.value - return env_value - - def __getitem__(self, key): - return self.get_environment_variable(key) - - def get(self, key, default=None): - r = self.get_environment_variable(key) - return r if None is not r else default - - sickgear.ENV = WinEnvVar() -else: - # noinspection PyCompatibility - class LinuxEnvVar(EnvVar): - # noinspection PyMissingConstructor - def __init__(self, environ): - self.environ = environ - - def __getitem__(self, key): - v = self.environ.get(key) - try: - return v if not isinstance(v, str) else v.decode(sickgear.SYS_ENCODING) - except (UnicodeDecodeError, UnicodeEncodeError): - return v - - def get(self, key, default=None): - v = self[key] - return v if None is not v else default - - sickgear.ENV = LinuxEnvVar(os.environ) +sickgear.ENV = EnvVar() # backport from python 3 diff --git a/sickgear/clients/download_station.py b/sickgear/clients/download_station.py index 3147ab58..62fc27ff 100644 --- a/sickgear/clients/download_station.py +++ b/sickgear/clients/download_station.py @@ -26,7 +26,7 @@ from .. import logger from ..sgdatetime import timestamp_near import sickgear -from _23 import filter_iter, filter_list, map_list, unquote_plus +from _23 import unquote_plus from six import string_types # noinspection PyUnreachableCode @@ -96,21 +96,21 @@ class DownloadStationAPI(GenericClient): id=t['id'], title=t['title'], total_size=t.get('size') or 0, added_ts=d.get('create_time'), last_completed_ts=d.get('completed_time'), last_started_ts=d.get('started_time'), seed_elapsed_secs=d.get('seedelapsed'), - wanted_size=sum(map_list(lambda tf: wanted(tf) and tf.get('size') or 0, f)) or None, - wanted_down=sum(map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, f)) or None, + wanted_size=sum(list(map(lambda tf: wanted(tf) and tf.get('size') or 0, f))) or None, + wanted_down=sum(list(map(lambda tf: wanted(tf) and downloaded(tf) or 0, f))) or None, tally_down=downloaded(tx), tally_up=tx.get('size_uploaded'), - state='done' if re.search('finish', t['status']) else ('seed', 'down')[any(filter_list( - lambda tf: wanted(tf) and (downloaded(tf, -1) < tf.get('size', 0)), f))] + state='done' if re.search('finish', t['status']) else ('seed', 'down')[any(list(filter( + lambda tf: wanted(tf) and (downloaded(tf, -1) < tf.get('size', 0)), f)))] )) # only available during "download" and "seeding" file_list = (lambda t: t.get('additional', {}).get('file', {})) valid_stat = (lambda ti: not ti.get('error') and isinstance(ti.get('status'), string_types) - and sum(map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, file_list(ti)))) - result = map_list(lambda t: base_state( + and sum(list(map(lambda tf: wanted(tf) and downloaded(tf) or 0, file_list(ti))))) + result = list(map(lambda t: base_state( t, t.get('additional', {}).get('detail', {}), t.get('additional', {}).get('transfer', {}), file_list(t)), - filter_list(lambda t: t['status'] in ('downloading', 'seeding', 'finished') and valid_stat(t), - tasks)) + list(filter(lambda t: t['status'] in ('downloading', 'seeding', 'finished') and valid_stat(t), + tasks)))) return result @@ -133,13 +133,13 @@ class DownloadStationAPI(GenericClient): t_params=dict(additional='detail,file,transfer'))['data']['tasks'] else: # noinspection PyUnresolvedReferences - tasks = (filter_list(lambda d: d.get('id') == rid, self._testdata), self._testdata)[not rid] + tasks = (list(filter(lambda d: d.get('id') == rid, self._testdata)), self._testdata)[not rid] result += tasks and (isinstance(tasks, list) and tasks or (isinstance(tasks, dict) and [tasks])) \ or ([], [{'error': True, 'id': rid}])[err] except (BaseException, Exception): if getinfo: result += [dict(error=True, id=rid)] - for t in filter_iter(lambda d: isinstance(d.get('title'), string_types) and d.get('title'), result): + for t in filter(lambda d: isinstance(d.get('title'), string_types) and d.get('title'), result): t['title'] = unquote_plus(t.get('title')) return result @@ -211,7 +211,7 @@ class DownloadStationAPI(GenericClient): :return: True if success, Id(s) that could not be acted upon, else Falsy if failure """ if isinstance(ids, (string_types, list)): - rids = ids if isinstance(ids, list) else map_list(lambda x: x.strip(), ids.split(',')) + rids = ids if isinstance(ids, list) else list(map(lambda x: x.strip(), ids.split(','))) result = pause_first and self._pause_torrent(rids) # get items not paused result = (isinstance(result, list) and result or []) @@ -225,7 +225,7 @@ class DownloadStationAPI(GenericClient): if isinstance(ids, (string_types, list)): item = dict(fail=[], ignore=[]) - for task in filter_iter(filter_func, self._tinf(ids, err=True)): + for task in filter(filter_func, self._tinf(ids, err=True)): item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('id')] # retry items not acted on @@ -237,7 +237,7 @@ class DownloadStationAPI(GenericClient): logger.log('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i), logger.DEBUG) time.sleep(i) item['fail'] = [] - for task in filter_iter(filter_func, self._tinf(retry_ids, err=True)): + for task in filter(filter_func, self._tinf(retry_ids, err=True)): item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('id')] if not item['fail']: @@ -303,7 +303,7 @@ class DownloadStationAPI(GenericClient): # noinspection PyUnresolvedReferences if response and response.get('success'): for s in (1, 3, 5, 10, 15, 30, 60): - tasks = filter_list(lambda t: task_stamp <= t['additional']['detail']['create_time'], self._tinf()) + tasks = list(filter(lambda t: task_stamp <= t['additional']['detail']['create_time'], self._tinf())) try: return str(self._client_has(tasks, uri, files)[0].get('id')) except IndexError: @@ -324,8 +324,8 @@ class DownloadStationAPI(GenericClient): if uri or files: u = isinstance(uri, dict) and (uri.get('uri', '') or '').lower() or None f = isinstance(files, dict) and (files.get('file', [''])[0]).lower() or None - result = filter_list(lambda t: u and t['additional']['detail']['uri'].lower() == u - or f and t['additional']['detail']['uri'].lower() in f, tasks) + result = list(filter(lambda t: u and t['additional']['detail']['uri'].lower() == u + or f and t['additional']['detail']['uri'].lower() in f, tasks)) return result def _client_request(self, method, t_id=None, t_params=None, files=None): @@ -360,7 +360,7 @@ class DownloadStationAPI(GenericClient): return self._error_task(response) if None is not t_id and None is t_params and 'create' != method: - return filter_list(lambda r: r.get('error'), response.get('data', {})) or True + return list(filter(lambda r: r.get('error'), response.get('data', {}))) or True return response diff --git a/sickgear/clients/qbittorrent.py b/sickgear/clients/qbittorrent.py index f0aa5ebd..20b7690c 100644 --- a/sickgear/clients/qbittorrent.py +++ b/sickgear/clients/qbittorrent.py @@ -26,7 +26,7 @@ import sickgear from requests.exceptions import HTTPError -from _23 import filter_iter, filter_list, map_list, unquote_plus +from _23 import unquote_plus from six import string_types # noinspection PyUnreachableCode @@ -58,9 +58,9 @@ class QbittorrentAPI(GenericClient): id=t['hash'], title=t['name'], total_size=gp.get('total_size') or 0, added_ts=gp.get('addition_date'), last_completed_ts=gp.get('completion_date'), last_started_ts=None, seed_elapsed_secs=gp.get('seeding_time'), - wanted_size=sum(map_list(lambda tf: wanted(tf) and tf.get('size') or 0, f)) or None, - wanted_down=sum(map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, f)) or None, - tally_down=sum(map_list(lambda tf: downloaded(tf) or 0, f)) or None, + wanted_size=sum(list(map(lambda tf: wanted(tf) and tf.get('size') or 0, f))) or None, + wanted_down=sum(list(map(lambda tf: wanted(tf) and downloaded(tf) or 0, f))) or None, + tally_down=sum(list(map(lambda tf: downloaded(tf) or 0, f))) or None, tally_up=gp.get('total_uploaded'), state='done' if 'pausedUP' == t.get('state') else ('down', 'seed')['up' in t.get('state').lower()] )) @@ -68,10 +68,10 @@ class QbittorrentAPI(GenericClient): ('torrents/files', 'query/propertiesFiles/%s' % ti['hash'])[not self.api_ns], params=({'hash': ti['hash']}, {})[not self.api_ns], json=True) or {}) valid_stat = (lambda ti: not self._ignore_state(ti) - and sum(map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, file_list(ti)))) - result = map_list(lambda t: base_state(t, self._tinf(t['hash'])[0], file_list(t)), - filter_list(lambda t: re.search('(?i)queue|stall|(up|down)load|pausedUP', t['state']) and - valid_stat(t), self._tinf(ids, False))) + and sum(list(map(lambda tf: wanted(tf) and downloaded(tf) or 0, file_list(ti))))) + result = list(map(lambda t: base_state(t, self._tinf(t['hash'])[0], file_list(t)), + list(filter(lambda t: re.search('(?i)queue|stall|(up|down)load|pausedUP', t['state']) and + valid_stat(t), self._tinf(ids, False))))) return result @@ -109,8 +109,7 @@ class QbittorrentAPI(GenericClient): except (BaseException, Exception): if getinfo: result += [dict(error=True, id=rid)] - for t in filter_iter(lambda d: isinstance(d.get('name'), string_types) and d.get('name'), - (result, [])[getinfo]): + for t in filter(lambda d: isinstance(d.get('name'), string_types) and d.get('name'), (result, [])[getinfo]): t['name'] = unquote_plus(t.get('name')) return result @@ -290,7 +289,7 @@ class QbittorrentAPI(GenericClient): :return: True if success, Id(s) that could not be acted upon, else Falsy if failure """ if isinstance(ids, (string_types, list)): - rids = ids if isinstance(ids, list) else map_list(lambda x: x.strip(), ids.split(',')) + rids = ids if isinstance(ids, list) else list(map(lambda x: x.strip(), ids.split(','))) result = pause_first and self._pause_torrent(rids) # get items not paused result = (isinstance(result, list) and result or []) @@ -304,7 +303,7 @@ class QbittorrentAPI(GenericClient): if isinstance(ids, (string_types, list)): item = dict(fail=[], ignore=[]) - for task in filter_iter(filter_func, self._tinf(ids, use_props=False, err=True)): + for task in filter(filter_func, self._tinf(ids, use_props=False, err=True)): item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('hash')] # retry items that are not acted on @@ -316,7 +315,7 @@ class QbittorrentAPI(GenericClient): logger.log('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i), logger.DEBUG) time.sleep(i) item['fail'] = [] - for task in filter_iter(filter_func, self._tinf(retry_ids, use_props=False, err=True)): + for task in filter(filter_func, self._tinf(retry_ids, use_props=False, err=True)): item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('hash')] if not item['fail']: @@ -378,7 +377,7 @@ class QbittorrentAPI(GenericClient): if True is response: for s in (1, 3, 5, 10, 15, 30, 60): - if filter_list(lambda t: task_stamp <= t['addition_date'], self._tinf(data.hash)): + if list(filter(lambda t: task_stamp <= t['addition_date'], self._tinf(data.hash))): return data.hash time.sleep(s) return True diff --git a/sickgear/common.py b/sickgear/common.py index 804fee6b..9ad5f3ef 100644 --- a/sickgear/common.py +++ b/sickgear/common.py @@ -25,7 +25,6 @@ import uuid import sickgear -from _23 import map_list from six import integer_types, iterkeys, string_types # noinspection PyUnresolvedReferences @@ -563,8 +562,8 @@ for (attr_name, qual_val) in [ ('SNATCHED', SNATCHED), ('SNATCHED_PROPER', SNATCHED_PROPER), ('SNATCHED_BEST', SNATCHED_BEST), ('DOWNLOADED', DOWNLOADED), ('ARCHIVED', ARCHIVED), ('FAILED', FAILED), ]: - setattr(Quality, attr_name, map_list(lambda qk: Quality.compositeStatus(qual_val, qk), - iterkeys(Quality.qualityStrings))) + setattr(Quality, attr_name, list(map(lambda qk: Quality.compositeStatus(qual_val, qk), + iterkeys(Quality.qualityStrings)))) Quality.SNATCHED_ANY = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_BEST SD = Quality.combineQualities([Quality.SDTV, Quality.SDDVD], []) diff --git a/sickgear/config.py b/sickgear/config.py index c98df792..39a497c3 100644 --- a/sickgear/config.py +++ b/sickgear/config.py @@ -23,7 +23,7 @@ import sickgear.providers from . import db, helpers, logger, naming from lib.api_trakt import TraktAPI -from _23 import filter_list, urlsplit, urlunsplit +from _23 import urlsplit, urlunsplit from six import string_types @@ -831,7 +831,7 @@ class ConfigMigrator(object): # Migration v15: Transmithe.net variables def _migrate_v15(self): try: - neb = filter_list(lambda p: 'Nebulance' in p.name, sickgear.providers.sortedProviderList())[0] + neb = list(filter(lambda p: 'Nebulance' in p.name, sickgear.providers.sortedProviderList()))[0] except (BaseException, Exception): return # get the old settings from the file and store them in the new variable names diff --git a/sickgear/db.py b/sickgear/db.py index b9ee5a4e..bce8ed81 100644 --- a/sickgear/db.py +++ b/sickgear/db.py @@ -32,7 +32,7 @@ from .sgdatetime import timestamp_near from sg_helpers import make_path, compress_file, remove_file_perm, scantree -from _23 import filter_iter, filter_list, list_values, scandir +from _23 import scandir from six import iterkeys, iteritems, itervalues # noinspection PyUnreachableCode @@ -80,12 +80,12 @@ def mass_upsert_sql(table_name, value_dict, key_dict, sanitise=True): # sanity: remove k, v pairs in keyDict from valueDict if sanitise: - value_dict = dict(filter_iter(lambda k: k[0] not in key_dict, iteritems(value_dict))) + value_dict = dict(filter(lambda k: k[0] not in key_dict, iteritems(value_dict))) # noinspection SqlResolve cl.append(['UPDATE [%s] SET %s WHERE %s' % (table_name, ', '.join(gen_params(value_dict)), ' AND '.join(gen_params(key_dict))), - list_values(value_dict) + list_values(key_dict)]) + list(value_dict.values()) + list(key_dict.values())]) # noinspection SqlResolve cl.append(['INSERT INTO [' + table_name + '] (' + @@ -304,14 +304,14 @@ class DBConnection(object): query = 'UPDATE [%s] SET %s WHERE %s' % ( table_name, ', '.join(gen_params(value_dict)), ' AND '.join(gen_params(key_dict))) - self.action(query, list_values(value_dict) + list_values(key_dict)) + self.action(query, list(value_dict.values()) + list(key_dict.values())) if self.connection.total_changes == changes_before: # noinspection SqlResolve query = 'INSERT INTO [' + table_name + ']' \ + ' (%s)' % ', '.join(itertools.chain(iterkeys(value_dict), iterkeys(key_dict))) \ + ' VALUES (%s)' % ', '.join(['?'] * (len(value_dict) + len(key_dict))) - self.action(query, list_values(value_dict) + list_values(key_dict)) + self.action(query, list(value_dict.values()) + list(key_dict.values())) def tableInfo(self, table_name): # type: (AnyStr) -> Dict[AnyStr, Dict[AnyStr, AnyStr]] @@ -544,7 +544,7 @@ class SchemaUpgrade(object): # get old table columns and store the ones we want to keep result = self.connection.select('pragma table_info([%s])' % table) columns_list = ([column], column)[isinstance(column, list)] - keptColumns = filter_list(lambda col: col['name'] not in columns_list, result) + keptColumns = list(filter(lambda col: col['name'] not in columns_list, result)) keptColumnsNames = [] final = [] @@ -759,9 +759,9 @@ def cleanup_old_db_backups(filename): d, filename = os.path.split(filename) if not d: d = sickgear.DATA_DIR - for f in filter_iter(lambda fn: fn.is_file() and filename in fn.name and - re.search(r'\.db(\.v\d+)?\.r\d+$', fn.name), - scandir(d)): + for f in filter(lambda fn: fn.is_file() and filename in fn.name and + re.search(r'\.db(\.v\d+)?\.r\d+$', fn.name), + scandir(d)): try: os.unlink(f.path) except (BaseException, Exception): diff --git a/sickgear/failed_history.py b/sickgear/failed_history.py index 5af91a6d..0989c0ed 100644 --- a/sickgear/failed_history.py +++ b/sickgear/failed_history.py @@ -25,7 +25,6 @@ from .history import dateFormat from exceptions_helper import EpisodeNotFoundException, ex from _23 import unquote -from six import PY2, text_type # noinspection PyUnresolvedReferences # noinspection PyUnreachableCode @@ -83,10 +82,6 @@ def prepare_failed_name(release): fixed = re.sub(r'[.\-+ ]', '_', fixed) - # noinspection PyUnresolvedReferences - if PY2 and not isinstance(fixed, unicode): - fixed = text_type(fixed, 'utf-8', 'replace') - return fixed diff --git a/sickgear/helpers.py b/sickgear/helpers.py index 58dd3562..9a8b9db6 100644 --- a/sickgear/helpers.py +++ b/sickgear/helpers.py @@ -43,8 +43,9 @@ import requests import requests.exceptions import subliminal from lxml_etree import etree, is_lxml +from base64 import decodebytes as b64decodebytes, encodebytes as b64encodebytes -from _23 import b64decodebytes, b64encodebytes, decode_bytes, decode_str, filter_iter, scandir +from _23 import decode_bytes, decode_str, scandir from six import iteritems, string_types, text_type # noinspection PyUnresolvedReferences from six.moves import zip @@ -1317,7 +1318,7 @@ def has_anime(): :rtype: bool """ # noinspection PyTypeChecker - return False if not sickgear.showList else any(filter_iter(lambda show: show.is_anime, sickgear.showList)) + return False if not sickgear.showList else any(filter(lambda show: show.is_anime, sickgear.showList)) def cpu_sleep(): @@ -1682,7 +1683,7 @@ def upgrade_new_naming(): (d_entry.path, new_dir_name, repr(e), ex(e)), logger.WARNING) if os.path.isdir(new_dir_name): try: - f_n = filter_iter(lambda fn: fn.is_file(), scandir(new_dir_name)) + f_n = filter(lambda fn: fn.is_file(), scandir(new_dir_name)) except OSError as e: logger.log('Unable to rename %s / %s' % (repr(e), ex(e)), logger.WARNING) diff --git a/sickgear/history.py b/sickgear/history.py index 844088c5..49d2f68e 100644 --- a/sickgear/history.py +++ b/sickgear/history.py @@ -22,8 +22,6 @@ from .common import FAILED, SNATCHED, SNATCHED_PROPER, SUBTITLED, Quality from .name_parser.parser import NameParser import sickgear -from six import PY2, text_type - # noinspection PyUnreachableCode if False: from typing import Any, AnyStr @@ -47,9 +45,6 @@ def _log_history_item(action, tvid, prodid, season, episode, quality, resource, """ log_date = datetime.datetime.now().strftime(dateFormat) - if PY2 and not isinstance(resource, text_type): - resource = text_type(resource, 'utf-8', 'replace') - my_db = db.DBConnection() my_db.action( 'INSERT INTO history' diff --git a/sickgear/indexermapper.py b/sickgear/indexermapper.py index b5eafcf8..26e70480 100644 --- a/sickgear/indexermapper.py +++ b/sickgear/indexermapper.py @@ -26,8 +26,7 @@ import sickgear from lib.dateutil.parser import parse -from _23 import unidecode -from six import iteritems, moves, string_types, PY2 +from six import iteritems, moves, string_types # noinspection PyUnreachableCode if False: @@ -178,9 +177,7 @@ def clean_show_name(showname): :return: :rtype: AnyStr """ - if not PY2: - return re.sub(r'[(\s]*(?:19|20)\d\d[)\s]*$', '', showname) - return re.sub(r'[(\s]*(?:19|20)\d\d[)\s]*$', '', unidecode(showname)) + return re.sub(r'[(\s]*(?:19|20)\d\d[)\s]*$', '', showname) def get_show_name_date(show_obj): diff --git a/sickgear/indexers/indexer_api.py b/sickgear/indexers/indexer_api.py index 530faa96..c5ee5f65 100644 --- a/sickgear/indexers/indexer_api.py +++ b/sickgear/indexers/indexer_api.py @@ -20,8 +20,6 @@ from sg_helpers import proxy_setting import sickgear from lib.tvinfo_base import TVInfoBase -from _23 import list_values - # noinspection PyUnreachableCode if False: from typing import AnyStr, Dict @@ -83,13 +81,13 @@ class TVInfoAPI(object): @property def sources(self): # type: () -> Dict[int, AnyStr] - return dict([(int(x['id']), x['name']) for x in list_values(tvinfo_config) if not x['mapped_only'] and + return dict([(int(x['id']), x['name']) for x in list(tvinfo_config.values()) if not x['mapped_only'] and True is not x.get('fallback') and True is not x.get('people_only')]) @property def search_sources(self): # type: () -> Dict[int, AnyStr] - return dict([(int(x['id']), x['name']) for x in list_values(tvinfo_config) if not x['mapped_only'] and + return dict([(int(x['id']), x['name']) for x in list(tvinfo_config.values()) if not x['mapped_only'] and x.get('active') and not x.get('defunct') and True is not x.get('fallback') and True is not x.get('people_only')]) @@ -99,7 +97,7 @@ class TVInfoAPI(object): """ :return: return all indexers including mapped only indexers excluding fallback indexers """ - return dict([(int(x['id']), x['name']) for x in list_values(tvinfo_config) if True is not x.get('fallback') + return dict([(int(x['id']), x['name']) for x in list(tvinfo_config.values()) if True is not x.get('fallback') and True is not x.get('people_only')]) @property @@ -108,9 +106,9 @@ class TVInfoAPI(object): """ :return: return all fallback indexers """ - return dict([(int(x['id']), x['name']) for x in list_values(tvinfo_config) if True is x.get('fallback')]) + return dict([(int(x['id']), x['name']) for x in list(tvinfo_config.values()) if True is x.get('fallback')]) @property def xem_supported_sources(self): # type: () -> Dict[int, AnyStr] - return dict([(int(x['id']), x['name']) for x in list_values(tvinfo_config) if x.get('xem_origin')]) + return dict([(int(x['id']), x['name']) for x in list(tvinfo_config.values()) if x.get('xem_origin')]) diff --git a/sickgear/metadata/__init__.py b/sickgear/metadata/__init__.py index 8e1a4315..95fbcf48 100644 --- a/sickgear/metadata/__init__.py +++ b/sickgear/metadata/__init__.py @@ -19,11 +19,10 @@ __all__ = ['generic', 'helpers', 'kodi', 'mede8er', 'mediabrowser', 'ps3', 'tivo import sys from . import kodi, mede8er, mediabrowser, ps3, tivo, wdtv, xbmc, xbmc_12plus -from _23 import filter_list def available_generators(): - return filter_list(lambda x: x not in ('generic', 'helpers'), __all__) + return list(filter(lambda x: x not in ('generic', 'helpers'), __all__)) def _getMetadataModule(name): diff --git a/sickgear/metadata/generic.py b/sickgear/metadata/generic.py index 810a01f0..d80022b3 100644 --- a/sickgear/metadata/generic.py +++ b/sickgear/metadata/generic.py @@ -35,7 +35,6 @@ from lib.fanart.core import Request as fanartRequest import lib.fanart as fanart from lxml_etree import etree -from _23 import filter_iter, list_keys from six import iteritems, itervalues, string_types # noinspection PyUnreachableCode @@ -874,7 +873,7 @@ class GenericMetadata(object): tv_id).name + ", not downloading images: " + ex(e), logger.WARNING) # todo: when tmdb is added as tv source remove the hardcoded TVINFO_TMDB - for tv_src in list(OrderedDict.fromkeys([show_obj.tvid] + list_keys(sickgear.TVInfoAPI().search_sources) + + for tv_src in list(OrderedDict.fromkeys([show_obj.tvid] + list(sickgear.TVInfoAPI().search_sources) + [TVINFO_TMDB])): if tv_src != show_obj.tvid and not show_obj.ids.get(tv_src, {}).get('id'): continue @@ -1220,9 +1219,9 @@ class GenericMetadata(object): resp = request.response() itemlist = [] dedupe = [] - for art in filter_iter(lambda i: 10 < len(i.get('url', '')) and (lang == i.get('lang', '')[0:2]), - # remove "[0:2]" ... to strictly use only data where "en" is at source - resp[types[image_type]]): # type: dict + for art in filter(lambda i: 10 < len(i.get('url', '')) and (lang == i.get('lang', '')[0:2]), + # remove "[0:2]" ... to strictly use only data where "en" is at source + resp[types[image_type]]): # type: dict try: url = (art['url'], art['url'].replace('/fanart/', '/preview/'))[thumb] if url not in dedupe: diff --git a/sickgear/metadata/kodi.py b/sickgear/metadata/kodi.py index e679ebeb..8472f30c 100644 --- a/sickgear/metadata/kodi.py +++ b/sickgear/metadata/kodi.py @@ -29,7 +29,7 @@ import exceptions_helper from exceptions_helper import ex from lxml_etree import etree -from _23 import decode_str, map_iter +from _23 import decode_str from six import string_types # noinspection PyUnreachableCode @@ -157,7 +157,7 @@ class KODIMetadata(generic.GenericMetadata): has_id = False tvdb_id = None - for tvid, slug in map_iter( + for tvid, slug in map( lambda _tvid: (_tvid, sickgear.TVInfoAPI(_tvid).config.get('kodi_slug')), list(sickgear.TVInfoAPI().all_sources)): mid = slug and show_obj.ids[tvid].get('id') diff --git a/sickgear/name_parser/parser.py b/sickgear/name_parser/parser.py index 8d63bb59..a1d37109 100644 --- a/sickgear/name_parser/parser.py +++ b/sickgear/name_parser/parser.py @@ -39,8 +39,8 @@ from lib.tvinfo_base.exceptions import * from ..classes import OrderedDefaultdict from .._legacy_classes import LegacyParseResult -from _23 import decode_str, list_keys, list_range -from six import iteritems, iterkeys, itervalues, PY2, string_types, text_type +from _23 import decode_str, list_range +from six import iteritems, iterkeys, itervalues, string_types, text_type # noinspection PyUnreachableCode if False: @@ -166,7 +166,7 @@ class NameParser(object): result.which_regex = [cur_regex_name] result.score = 0 - cur_regex_num - named_groups = list_keys(match.groupdict()) + named_groups = list(match.groupdict()) if 'series_name' in named_groups: result.series_name = match.group('series_name') @@ -511,10 +511,7 @@ class NameParser(object): @staticmethod def _unicodify(obj, encoding='utf-8'): - if PY2 and isinstance(obj, string_types): - if not isinstance(obj, text_type): - obj = text_type(obj, encoding, 'replace') - if not PY2 and isinstance(obj, text_type): + if isinstance(obj, text_type): try: return obj.encode('latin1').decode('utf8') except (BaseException, Exception): @@ -751,9 +748,7 @@ class ParseResult(LegacyParseResult): self.release_group, self.air_date, tuple(self.ab_episode_numbers))) def __str__(self): - if not PY2: - return self.__unicode__() - return self.__unicode__().encode('utf-8', errors='ignore') + return self.__unicode__() def __unicode__(self): if None is not self.series_name: diff --git a/sickgear/network_timezones.py b/sickgear/network_timezones.py index 04c70aef..961b9511 100644 --- a/sickgear/network_timezones.py +++ b/sickgear/network_timezones.py @@ -29,8 +29,7 @@ from lib.dateutil import tz, zoneinfo from lib.tzlocal import get_localzone from sg_helpers import remove_file_perm, scantree -from six import integer_types, iteritems, string_types, PY2 -from _23 import list_keys +from six import integer_types, iteritems, string_types # noinspection PyUnreachableCode if False: @@ -547,7 +546,7 @@ def _load_network_conversions(): # remove deleted records if 0 < len(conversions_db): - network_name = list_keys(conversions_db) + network_name = list(conversions_db) cl.append(['DELETE FROM network_conversions WHERE tvdb_network' ' IN (%s)' % ','.join(['?'] * len(network_name)), network_name]) @@ -632,8 +631,6 @@ def get_episode_time(d, # type: int if d and None is not ep_time and None is not tzinfo: ep_date = datetime.date.fromordinal(helpers.try_int(d)) - if PY2: - return datetime.datetime.combine(ep_date, ep_time).replace(tzinfo=tzinfo) return datetime.datetime.combine(ep_date, ep_time, tzinfo) return parse_date_time(d, t, tzinfo) diff --git a/sickgear/notifiers/__init__.py b/sickgear/notifiers/__init__.py index 1b56c4f5..b35ae421 100644 --- a/sickgear/notifiers/__init__.py +++ b/sickgear/notifiers/__init__.py @@ -25,8 +25,6 @@ from . import emby, kodi, plex, xbmc, \ import sickgear -from _23 import filter_iter, list_values - class NotifierFactory(object): @@ -68,32 +66,27 @@ class NotifierFactory(object): :return: ID String :rtype: String """ - for n in filter_iter(lambda v: v.is_enabled(), - list_values(self.notifiers)): + for n in filter(lambda v: v.is_enabled(), list(self.notifiers.values())): yield n.id() @property def enabled_onsnatch(self): - for n in filter_iter(lambda v: v.is_enabled() and v.is_enabled_onsnatch(), - list_values(self.notifiers)): + for n in filter(lambda v: v.is_enabled() and v.is_enabled_onsnatch(), list(self.notifiers.values())): yield n.id() @property def enabled_ondownload(self): - for n in filter_iter(lambda v: v.is_enabled() and v.is_enabled_ondownload(), - list_values(self.notifiers)): + for n in filter(lambda v: v.is_enabled() and v.is_enabled_ondownload(), list(self.notifiers.values())): yield n.id() @property def enabled_onsubtitledownload(self): - for n in filter_iter(lambda v: v.is_enabled() and v.is_enabled_onsubtitledownload(), - list_values(self.notifiers)): + for n in filter(lambda v: v.is_enabled() and v.is_enabled_onsubtitledownload(), list(self.notifiers.values())): yield n.id() @property def enabled_library(self): - for n in filter_iter(lambda v: v.is_enabled() and v.is_enabled_library(), - list_values(self.notifiers)): + for n in filter(lambda v: v.is_enabled() and v.is_enabled_library(), list(self.notifiers.values())): yield n.id() def get(self, nid): diff --git a/sickgear/notifiers/emby.py b/sickgear/notifiers/emby.py index 249c6639..81065c00 100644 --- a/sickgear/notifiers/emby.py +++ b/sickgear/notifiers/emby.py @@ -21,7 +21,7 @@ from .generic import Notifier from json_helper import json_loads import sickgear -from _23 import decode_bytes, decode_str, map_list +from _23 import decode_bytes, decode_str class EmbyNotifier(Notifier): @@ -50,7 +50,7 @@ class EmbyNotifier(Notifier): timeout=20, hooks=dict(response=self._cb_response), json=True) return self.response and self.response.get('ok') and 200 == self.response.get('status_code') and \ - version <= map_list(lambda x: int(x), (response and response.get('Version') or '0.0.0.0').split('.')) + version <= list(map(lambda x: int(x), (response and response.get('Version') or '0.0.0.0').split('.'))) def update_library(self, show_obj=None, **kwargs): """ Update library function diff --git a/sickgear/notifiers/plex.py b/sickgear/notifiers/plex.py index 5eaf646c..b84c7d89 100644 --- a/sickgear/notifiers/plex.py +++ b/sickgear/notifiers/plex.py @@ -20,8 +20,8 @@ from .generic import Notifier import sickgear from exceptions_helper import ex -from _23 import b64encodestring, decode_str, etree, filter_iter, list_values, unquote_plus, urlencode -from six import iteritems, text_type, PY2 +from _23 import b64encodestring, decode_str, etree, unquote_plus, urlencode +from six import iteritems # noinspection PyUnresolvedReferences from six.moves import urllib @@ -49,8 +49,7 @@ class PLEXNotifier(Notifier): return False for key in command: - if not PY2 or type(command[key]) == text_type: - command[key] = command[key].encode('utf-8') + command[key] = command[key].encode('utf-8') enc_command = urlencode(command) self._log_debug(u'Encoded API command: ' + enc_command) @@ -203,7 +202,7 @@ class PLEXNotifier(Notifier): hosts_failed.append(cur_host) continue - for section in filter_iter(lambda x: 'show' == x.attrib['type'], sections): + for section in filter(lambda x: 'show' == x.attrib['type'], sections): if str(section.attrib['key']) in hosts_all: continue keyed_host = [(str(section.attrib['key']), cur_host)] @@ -247,18 +246,14 @@ class PLEXNotifier(Notifier): return '' hosts = [ - host.replace('http://', '') for host in filter_iter(lambda x: x.startswith('http:'), - list_values(hosts_all))] + host.replace('http://', '') for host in filter(lambda x: x.startswith('http:'), list(hosts_all.values()))] secured = [ - host.replace('https://', '') for host in filter_iter(lambda x: x.startswith('https:'), - list_values(hosts_all))] + host.replace('https://', '') for host in filter(lambda x: x.startswith('https:'), list(hosts_all.values()))] failed = ', '.join([ - host.replace('http://', '') for host in filter_iter(lambda x: x.startswith('http:'), - hosts_failed)]) - failed_secured = ', '.join(filter_iter( + host.replace('http://', '') for host in filter(lambda x: x.startswith('http:'), hosts_failed)]) + failed_secured = ', '.join(filter( lambda x: x not in hosts, - [host.replace('https://', '') for host in filter_iter(lambda x: x.startswith('https:'), - hosts_failed)])) + [host.replace('https://', '') for host in filter(lambda x: x.startswith('https:'), hosts_failed)])) return '
' + '
'.join([result for result in [ ('', 'Fail: username/password when fetching credentials from plex.tv')[False is token_arg], diff --git a/sickgear/notifiers/trakt.py b/sickgear/notifiers/trakt.py index dcd2a28a..cb24c4ff 100644 --- a/sickgear/notifiers/trakt.py +++ b/sickgear/notifiers/trakt.py @@ -22,7 +22,6 @@ import sickgear from lib.api_trakt import TraktAPI, exceptions from exceptions_helper import ConnectionSkipException -from _23 import list_keys from six import iteritems # noinspection PyUnreachableCode @@ -38,7 +37,7 @@ class TraktNotifier(BaseNotifier): def is_enabled_library(cls): if sickgear.TRAKT_ACCOUNTS: for tid, locations in iteritems(sickgear.TRAKT_UPDATE_COLLECTION): - if tid in list_keys(sickgear.TRAKT_ACCOUNTS): + if tid in list(sickgear.TRAKT_ACCOUNTS): return True return False @@ -89,7 +88,7 @@ class TraktNotifier(BaseNotifier): data['shows'][0]['seasons'][0]['episodes'].append({'number': cur_ep_obj.episode}) for tid, locations in iteritems(sickgear.TRAKT_UPDATE_COLLECTION): - if tid not in list_keys(sickgear.TRAKT_ACCOUNTS): + if tid not in list(sickgear.TRAKT_ACCOUNTS): continue for loc in locations: if not ep_obj.location.startswith('%s%s' % (loc.rstrip(os.path.sep), os.path.sep)): diff --git a/sickgear/notifiers/xbmc.py b/sickgear/notifiers/xbmc.py index 71b24718..67b0412e 100644 --- a/sickgear/notifiers/xbmc.py +++ b/sickgear/notifiers/xbmc.py @@ -23,7 +23,6 @@ from exceptions_helper import ex from json_helper import json_dumps, json_load from _23 import b64encodestring, decode_str, etree, quote, unquote, unquote_plus, urlencode -from six import PY2, text_type # noinspection PyUnresolvedReferences from six.moves import urllib @@ -150,8 +149,7 @@ class XBMCNotifier(Notifier): password = self._choose(password, sickgear.XBMC_PASSWORD) for key in command: - if not PY2 or type(command[key]) == text_type: - command[key] = command[key].encode('utf-8') + command[key] = command[key].encode('utf-8') enc_command = urlencode(command) self._log_debug(u'Encoded API command: ' + enc_command) diff --git a/sickgear/piper.py b/sickgear/piper.py index 65217b8d..d4c8a3a9 100644 --- a/sickgear/piper.py +++ b/sickgear/piper.py @@ -10,8 +10,7 @@ import re from json_helper import json_loads from sg_helpers import cmdline_runner, is_virtualenv -from _23 import filter_list, ordered_dict -from six import iteritems, PY2 +from six import iteritems # noinspection PyUnreachableCode if False: @@ -51,10 +50,6 @@ def run_pip(pip_cmd, suppress_stderr=False): pip_cmd += ['--progress-bar', 'off'] new_pip_arg = ['--no-python-version-warning'] - if PY2: - pip_version, _, _ = _get_pip_version() - if pip_version and 20 > int(pip_version.split('.')[0]): - new_pip_arg = [] return cmdline_runner( [sys.executable, '-m', 'pip'] + new_pip_arg + ['--disable-pip-version-check'] + pip_cmd, @@ -72,7 +67,7 @@ def initial_requirements(): from Cheetah import VersionTuple is_cheetah2 = (3, 0, 0) > VersionTuple[0:3] - is_cheetah3py3 = not PY2 and (3, 3, 0) > VersionTuple[0:3] + is_cheetah3py3 = (3, 3, 0) > VersionTuple[0:3] if not (is_cheetah2 or is_cheetah3py3): return @@ -158,13 +153,10 @@ def check_pip_env(): _, _, installed, failed_names = _check_pip_env() - py2_last = 'final py2 release' boost = 'performance boost' extra_info = dict({'Cheetah3': 'filled requirement', 'CT3': 'filled requirement', 'lxml': boost, 'python-Levenshtein': boost}) - extra_info.update((dict(cryptography=py2_last, pip=py2_last, regex=py2_last, - scandir=boost, setuptools=py2_last), - dict(regex=boost))[not PY2]) + extra_info.update(dict(regex=boost)) return installed, extra_info, failed_names @@ -256,9 +248,9 @@ def _check_pip_env(pip_outdated=False, reset_fails=False): names_outdated = dict({cur_item.get('name'): {k: cur_item.get(k) for k in ('version', 'latest_version', 'latest_filetype')} for cur_item in json_loads(output)}) - to_update = set(filter_list( + to_update = set(list(filter( lambda name: name in specifiers and names_outdated[name]['latest_version'] in specifiers[name], - set(names_reco).intersection(set(names_outdated)))) + set(names_reco).intersection(set(names_outdated))))) # check whether to ignore direct reference specification updates if not dev mode if not int(os.environ.get('CHK_URL_SPECIFIERS', 0)): @@ -272,7 +264,7 @@ def _check_pip_env(pip_outdated=False, reset_fails=False): except (BaseException, Exception): pass - updates_todo = ordered_dict() + updates_todo = dict() todo = to_install.union(to_update, requirement_update) for cur_name in [cur_n for cur_n in names_reco if cur_n in todo]: updates_todo[cur_name] = dict({ diff --git a/sickgear/postProcessor.py b/sickgear/postProcessor.py index 945f257b..ac08f1df 100644 --- a/sickgear/postProcessor.py +++ b/sickgear/postProcessor.py @@ -33,7 +33,7 @@ from .indexers.indexer_config import TVINFO_TVDB from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser from _23 import decode_str -from six import iteritems, PY2, string_types +from six import iteritems, string_types from sg_helpers import long_path, cmdline_runner # noinspection PyUnreachableCode @@ -824,12 +824,7 @@ class PostProcessor(object): script_cmd[0] = os.path.abspath(script_cmd[0]) self._log(u'Absolute path to script: ' + script_cmd[0], logger.DEBUG) - if PY2: - script_cmd += [ep_obj.location.encode(sickgear.SYS_ENCODING), - self.file_path.encode(sickgear.SYS_ENCODING) - ] - else: - script_cmd += [ep_obj.location, self.file_path] + script_cmd += [ep_obj.location, self.file_path] script_cmd += ([], [str(ep_obj.show_obj.tvid)])[new_call] + [ str(ep_obj.show_obj.prodid), @@ -1174,9 +1169,8 @@ class PostProcessor(object): keepalive = keepalive_stop = None if self.webhandler: def keep_alive(webh, stop_event): - if not PY2: - import asyncio - asyncio.set_event_loop(asyncio.new_event_loop()) + import asyncio + asyncio.set_event_loop(asyncio.new_event_loop()) while not stop_event.is_set(): stop_event.wait(60) webh('.') diff --git a/sickgear/processTV.py b/sickgear/processTV.py index 18a7a0dc..78fff9fd 100644 --- a/sickgear/processTV.py +++ b/sickgear/processTV.py @@ -35,8 +35,7 @@ from .history import reset_status from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser from .sgdatetime import timestamp_near -from _23 import filter_list, filter_iter, list_values, map_iter -from six import iteritems, iterkeys, string_types, PY2, text_type +from six import iteritems, iterkeys, string_types, text_type from sg_helpers import long_path, scantree import lib.rarfile.rarfile as rarfile @@ -281,7 +280,7 @@ class ProcessTVShow(object): build_path = (lambda old_path: '%s%s' % (helpers.real_path(old_path).rstrip(os.path.sep), os.path.sep)) process_path = build_path(path) - for parent in map_iter(lambda p: build_path(p), sickgear.ROOT_DIRS.split('|')[1:]): + for parent in map(lambda p: build_path(p), sickgear.ROOT_DIRS.split('|')[1:]): if process_path.startswith(parent): return parent.rstrip(os.path.sep) @@ -352,7 +351,7 @@ class ProcessTVShow(object): path, dirs, files = self._get_path_dir_files(dir_name, nzb_name, pp_type) - if sickgear.POSTPONE_IF_SYNC_FILES and any(filter_iter(helpers.is_sync_file, files)): + if sickgear.POSTPONE_IF_SYNC_FILES and any(filter(helpers.is_sync_file, files)): self._log_helper(u'Found temporary sync files, skipping post process', logger.ERROR) return self.result @@ -367,7 +366,7 @@ class ProcessTVShow(object): work_files += [joined] rar_files, rarfile_history = self.unused_archives( - path, filter_list(helpers.is_first_rar_volume, files), pp_type, process_method) + path, list(filter(helpers.is_first_rar_volume, files)), pp_type, process_method) rar_content = self._unrar(path, rar_files, force) if self.fail_detected: self._process_failed(dir_name, nzb_name, show_obj=show_obj) @@ -376,8 +375,8 @@ class ProcessTVShow(object): rar_content = [x for x in rar_content if not helpers.is_link(os.path.join(path, x))] path, dirs, files = self._get_path_dir_files(dir_name, nzb_name, pp_type) files = [x for x in files if not helpers.is_link(os.path.join(path, x))] - video_files = filter_list(helpers.has_media_ext, files) - video_in_rar = filter_list(helpers.has_media_ext, rar_content) + video_files = list(filter(helpers.has_media_ext, files)) + video_in_rar = list(filter(helpers.has_media_ext, rar_content)) work_files += [os.path.join(path, item) for item in rar_content] if 0 < len(files): @@ -438,7 +437,7 @@ class ProcessTVShow(object): for walk_path, walk_dir, files in os.walk(os.path.join(path, directory), topdown=False): - if sickgear.POSTPONE_IF_SYNC_FILES and any(filter_iter(helpers.is_sync_file, files)): + if sickgear.POSTPONE_IF_SYNC_FILES and any(filter(helpers.is_sync_file, files)): self._log_helper(u'Found temporary sync files, skipping post process', logger.ERROR) return self.result @@ -452,7 +451,7 @@ class ProcessTVShow(object): files = [x for x in files if not helpers.is_link(os.path.join(walk_path, x))] rar_files, rarfile_history = self.unused_archives( - walk_path, filter_list(helpers.is_first_rar_volume, files), pp_type, process_method, + walk_path, list(filter(helpers.is_first_rar_volume, files)), pp_type, process_method, rarfile_history) rar_content = self._unrar(walk_path, rar_files, force) work_files += [os.path.join(walk_path, item) for item in rar_content] @@ -461,8 +460,8 @@ class ProcessTVShow(object): continue rar_content = [x for x in rar_content if not helpers.is_link(os.path.join(walk_path, x))] files = list(set(files + rar_content)) - video_files = filter_list(helpers.has_media_ext, files) - video_in_rar = filter_list(helpers.has_media_ext, rar_content) + video_files = list(filter(helpers.has_media_ext, files)) + video_in_rar = list(filter(helpers.has_media_ext, rar_content)) notwanted_files = [x for x in files if x not in video_files] # Don't Link media when the media is extracted from a rar in the same path @@ -640,7 +639,7 @@ class ProcessTVShow(object): all_dirs += process_dir all_files += fileList - video_files = filter_list(helpers.has_media_ext, all_files) + video_files = list(filter(helpers.has_media_ext, all_files)) all_dirs.append(dir_name) # check if the directory have at least one tv video file @@ -660,7 +659,7 @@ class ProcessTVShow(object): if sickgear.UNPACK and process_path and all_files: # Search for packed release - packed_files = filter_list(helpers.is_first_rar_volume, all_files) + packed_files = list(filter(helpers.is_first_rar_volume, all_files)) for packed in packed_files: try: @@ -719,9 +718,8 @@ class ProcessTVShow(object): rar_content = [os.path.normpath(x.filename) for x in rar_handle.infolist() if not x.is_dir()] renamed = self.cleanup_names(path, rar_content) cur_unpacked = rar_content if not renamed else \ - (list(set(rar_content) - set(iterkeys(renamed))) + list_values(renamed)) - self._log_helper(u'Unpacked content: [u\'%s\']' % '\', u\''.join(map_iter(text_type, - cur_unpacked))) + (list(set(rar_content) - set(iterkeys(renamed))) + list(renamed.values())) + self._log_helper(u'Unpacked content: [u\'%s\']' % '\', u\''.join(map(text_type, cur_unpacked))) unpacked_files += cur_unpacked except (rarfile.PasswordRequired, rarfile.RarWrongPassword): self._log_helper(u'Failed to unpack archive PasswordRequired: %s' % archive, logger.ERROR) @@ -928,10 +926,6 @@ class ProcessTVShow(object): if force or not self.any_vid_processed: return False - # Needed for accessing DB with a unicode dir_name - if PY2 and not isinstance(dir_name, text_type): - dir_name = text_type(dir_name, 'utf_8') - parse_result = None try: parse_result = NameParser(convert=True).parse(videofile, cache_result=False) @@ -974,8 +968,6 @@ class ProcessTVShow(object): else: # This is needed for video whose name differ from dir_name - if PY2 and not isinstance(videofile, text_type): - videofile = text_type(videofile, 'utf_8') sql_result = my_db.select( 'SELECT * FROM tv_episodes WHERE release_name = ?', [videofile.rpartition('.')[0]]) diff --git a/sickgear/properFinder.py b/sickgear/properFinder.py index 9e26b98c..5c3f899a 100644 --- a/sickgear/properFinder.py +++ b/sickgear/properFinder.py @@ -32,7 +32,7 @@ from .history import dateFormat from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser from .sgdatetime import timestamp_near -from _23 import filter_iter, filter_list, list_values, map_consume, map_list +from _23 import map_consume from six import string_types # noinspection PyUnreachableCode @@ -251,9 +251,9 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime # filter provider list for: # 1. from recent search: recent search enabled providers # 2. native proper search: active search enabled providers - provider_list = filter_list( + provider_list = list(filter( lambda p: p.is_active() and (p.enable_recentsearch, p.enable_backlog)[None is proper_dict], - sickgear.providers.sortedProviderList()) + sickgear.providers.sortedProviderList())) search_threads = [] if None is proper_dict: @@ -487,7 +487,7 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime cur_provider.log_result('Propers', len(propers), '%s' % cur_provider.name) - return list_values(propers) + return list(propers.values()) def _download_propers(proper_list): @@ -507,24 +507,24 @@ def _download_propers(proper_list): # get verified list; sort the list of unique Propers for highest proper_level, newest first for cur_proper in sorted( - filter_iter(lambda p: p not in consumed_proper, - # allows Proper to fail or be rejected and another to be tried (with a different name) - filter_iter(lambda p: _epid(p) not in downloaded_epid, proper_list)), + filter(lambda p: p not in consumed_proper, + # allows Proper to fail or be rejected and another to be tried (with a different name) + filter(lambda p: _epid(p) not in downloaded_epid, proper_list)), key=operator.attrgetter('properlevel', 'date'), reverse=True): # type: Proper epid = _epid(cur_proper) # if the show is in our list and there hasn't been a Proper already added for that particular episode # then add it to our list of Propers - if epid not in map_list(_epid, verified_propers): + if epid not in list(map(_epid, verified_propers)): logger.log('Proper may be useful [%s]' % cur_proper.name) verified_propers.add(cur_proper) else: # use Proper with the highest level remove_propers = set() map_consume(lambda vp: remove_propers.add(vp), - filter_iter(lambda p: (epid == _epid(p) and cur_proper.proper_level > p.proper_level), - verified_propers)) + filter(lambda p: (epid == _epid(p) and cur_proper.proper_level > p.proper_level), + verified_propers)) if remove_propers: verified_propers -= remove_propers diff --git a/sickgear/providers/__init__.py b/sickgear/providers/__init__.py index 4ba6218d..5ba75287 100644 --- a/sickgear/providers/__init__.py +++ b/sickgear/providers/__init__.py @@ -22,7 +22,6 @@ from .newznab import NewznabConstants from .. import logger import sickgear -from _23 import filter_list, filter_iter from six import iteritems, itervalues # noinspection PyUnreachableCode @@ -50,7 +49,7 @@ for module in __all__: try: m = importlib.import_module('.' + module, 'sickgear.providers') globals().update({n: getattr(m, n) for n in m.__all__} if hasattr(m, '__all__') - else dict(filter_iter(lambda t: '_' != t[0][0], iteritems(m.__dict__)))) + else dict(filter(lambda t: '_' != t[0][0], iteritems(m.__dict__)))) except ImportError as e: if 'custom' != module[0:6]: raise e @@ -74,12 +73,12 @@ def sortedProviderList(): newList.append(providerDict[curModule]) if not sickgear.PROVIDER_ORDER: - nzb = filter_list(lambda p: p.providerType == generic.GenericProvider.NZB, itervalues(providerDict)) - tor = filter_list(lambda p: p.providerType != generic.GenericProvider.NZB, itervalues(providerDict)) - newList = sorted(filter_iter(lambda p: not p.anime_only, nzb), key=lambda v: v.get_id()) + \ - sorted(filter_iter(lambda p: not p.anime_only, tor), key=lambda v: v.get_id()) + \ - sorted(filter_iter(lambda p: p.anime_only, nzb), key=lambda v: v.get_id()) + \ - sorted(filter_iter(lambda p: p.anime_only, tor), key=lambda v: v.get_id()) + nzb = list(filter(lambda p: p.providerType == generic.GenericProvider.NZB, itervalues(providerDict))) + tor = list(filter(lambda p: p.providerType != generic.GenericProvider.NZB, itervalues(providerDict))) + newList = sorted(filter(lambda p: not p.anime_only, nzb), key=lambda v: v.get_id()) + \ + sorted(filter(lambda p: not p.anime_only, tor), key=lambda v: v.get_id()) + \ + sorted(filter(lambda p: p.anime_only, nzb), key=lambda v: v.get_id()) + \ + sorted(filter(lambda p: p.anime_only, tor), key=lambda v: v.get_id()) # add any modules that are missing from that list for curModule in providerDict: @@ -119,7 +118,7 @@ def make_unique_list(p_list, d_list=None): default_names = [d.name for d in d_list or []] - p_list = filter_iter(lambda _x: _x.get_id() not in ['sick_beard_index'], p_list) + p_list = filter(lambda _x: _x.get_id() not in ['sick_beard_index'], p_list) for cur_p in p_list: g_name = generic_provider_name(cur_p.name) g_url = generic_provider_url(cur_p.url) @@ -139,7 +138,7 @@ def make_unique_list(p_list, d_list=None): def getNewznabProviderList(data): # type: (AnyStr) -> List defaultList = [makeNewznabProvider(x) for x in getDefaultNewznabProviders().split('!!!')] - providerList = make_unique_list(filter_list(lambda _x: _x, [makeNewznabProvider(x) for x in data.split('!!!')]), + providerList = make_unique_list(list(filter(lambda _x: _x, [makeNewznabProvider(x) for x in data.split('!!!')])), defaultList) providerDict = dict(zip([x.name for x in providerList], providerList)) @@ -158,7 +157,7 @@ def getNewznabProviderList(data): 'server_type'): setattr(providerDict[curDefault.name], k, getattr(curDefault, k)) - return filter_list(lambda _x: _x, providerList) + return list(filter(lambda _x: _x, providerList)) def makeNewznabProvider(config_string): @@ -189,9 +188,9 @@ def makeNewznabProvider(config_string): def getTorrentRssProviderList(data): - providerList = filter_list(lambda _x: _x, [makeTorrentRssProvider(x) for x in data.split('!!!')]) + providerList = list(filter(lambda _x: _x, [makeTorrentRssProvider(x) for x in data.split('!!!')])) - return filter_list(lambda _x: _x, providerList) + return list(filter(lambda _x: _x, providerList)) def makeTorrentRssProvider(config_string): diff --git a/sickgear/providers/alpharatio.py b/sickgear/providers/alpharatio.py index 4b4ed911..eb4e9a2e 100644 --- a/sickgear/providers/alpharatio.py +++ b/sickgear/providers/alpharatio.py @@ -25,7 +25,6 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import unidecode from six import iteritems @@ -63,7 +62,6 @@ class AlphaRatioProvider(generic.TorrentProvider): rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({'info': 'view', 'get': 'download'})]) for mode in search_params: for search_string in search_params[mode]: - search_string = unidecode(search_string) search_url = self.urls['search'] % (search_string, ('&freetorrent=1', '')[not self.freeleech]) html = self.get_url(search_url) diff --git a/sickgear/providers/bithdtv.py b/sickgear/providers/bithdtv.py index 4e7b4be9..86e37964 100644 --- a/sickgear/providers/bithdtv.py +++ b/sickgear/providers/bithdtv.py @@ -23,7 +23,6 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import unidecode from six import iteritems @@ -67,7 +66,6 @@ class BitHDTVProvider(generic.TorrentProvider): for mode in search_params: for search_string in search_params[mode]: - search_string = unidecode(search_string) search_url = self.urls['search'] % (search_string, self._categories_string(mode, '%s', ',')) html = self.get_url(search_url, timeout=90) diff --git a/sickgear/providers/blutopia.py b/sickgear/providers/blutopia.py index 0ef6bdb2..c8458a22 100644 --- a/sickgear/providers/blutopia.py +++ b/sickgear/providers/blutopia.py @@ -25,7 +25,6 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import filter_iter, unidecode from six import iteritems @@ -107,7 +106,6 @@ class BlutopiaProvider(generic.TorrentProvider): return results for search_string in search_params[mode]: - search_string = unidecode(search_string) search_url = self.urls['search'] % ( self._token, search_string.replace('.', ' '), self._categories_string(template=''), '', '', '') @@ -136,7 +134,7 @@ class BlutopiaProvider(generic.TorrentProvider): marked = ','.join([x.attrs.get('data-original-title', '').lower() for x in tr.find_all( 'i', attrs={'class': ['text-gold', 'fa-diamond', 'fa-certificate']})]) # noinspection PyTypeChecker - munged = ''.join(filter_iter(marked.__contains__, ['free', 'double', 'feat'])) + munged = ''.join(filter(marked.__contains__, ['free', 'double', 'feat'])) # noinspection PyUnboundLocalVariable if ((non_marked and rc['filter'].search(munged)) or (not non_marked and not rc['filter'].search(munged))): diff --git a/sickgear/providers/btn.py b/sickgear/providers/btn.py index f5373228..7af84475 100644 --- a/sickgear/providers/btn.py +++ b/sickgear/providers/btn.py @@ -32,7 +32,6 @@ from bs4_parser import BS4Parser from exceptions_helper import AuthException from json_helper import json_dumps -from _23 import unidecode from six import iteritems @@ -201,7 +200,6 @@ class BTNProvider(generic.TorrentProvider): del (self.session.headers['Referer']) self.auth_html = True - search_string = unidecode(search_string) search_url = self.urls['search'] % (search_string, self._categories_string(mode, 'filter_cat[%s]=1')) html = self.get_url(search_url, use_tmr_limit=False) diff --git a/sickgear/providers/eztv.py b/sickgear/providers/eztv.py index 86bad378..5a723b1b 100644 --- a/sickgear/providers/eztv.py +++ b/sickgear/providers/eztv.py @@ -23,7 +23,7 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import b64decodestring, unidecode +from _23 import b64decodestring from six import iteritems @@ -62,7 +62,6 @@ class EztvProvider(generic.TorrentProvider): for mode in search_params: for search_string in search_params[mode]: - search_string = unidecode(search_string) search_url = self.urls['browse'] % search_string if 'Cache' == mode else \ self.urls['search'] % search_string.replace('.', ' ') diff --git a/sickgear/providers/fano.py b/sickgear/providers/fano.py index 67eb8395..ebb34fc8 100644 --- a/sickgear/providers/fano.py +++ b/sickgear/providers/fano.py @@ -25,7 +25,6 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import unidecode from six import iteritems FLTAG = r'
\s+]+%s[^<]+Torrent let'})]) for mode in search_params: for search_string in search_params[mode]: - search_string = unidecode(search_string) search_url = self.urls['search'] % search_string # fetches 15 results by default, and up to 100 if allowed in user profile diff --git a/sickgear/providers/nebulance.py b/sickgear/providers/nebulance.py index 99feacd0..f8005eca 100644 --- a/sickgear/providers/nebulance.py +++ b/sickgear/providers/nebulance.py @@ -25,7 +25,7 @@ from ..helpers import try_int from bs4_parser import BS4Parser from json_helper import json_dumps -from _23 import filter_list, unidecode, unquote_plus +from _23 import unquote_plus from six import iteritems @@ -83,7 +83,6 @@ class NebulanceProvider(generic.TorrentProvider): rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({'nodots': r'[\.\s]+'})]) for mode in search_params: for search_string in search_params[mode]: - search_string = unidecode(search_string) search_url = self.urls['browse'] % (self.user_authkey, self.user_passkey) if 'Cache' != mode: @@ -164,7 +163,7 @@ class NebulanceProvider(generic.TorrentProvider): ('(?i)%s(Proper)%s' % (bl, br), r'`\1`'), (r'%s\s*%s' % (bl, br), '`')]: title = re.sub(r[0], r[1], title) - grp = filter_list(lambda rn: '.release' in rn.lower(), item['tags']) + grp = list(filter(lambda rn: '.release' in rn.lower(), item['tags'])) title = '%s%s-%s' % (('', t[0])[1 < len(t)], title, (any(grp) and grp[0] or 'nogrp').upper().replace('.RELEASE', '')) @@ -186,7 +185,7 @@ class NebulanceProvider(generic.TorrentProvider): for mode in search_params: for search_string in search_params[mode]: - search_string = unquote_plus(unidecode(search_string)) + search_string = unquote_plus(search_string) params = {'release': search_string} if 'Cache' == mode: diff --git a/sickgear/providers/nyaa.py b/sickgear/providers/nyaa.py index 8b2bd5a9..65156509 100644 --- a/sickgear/providers/nyaa.py +++ b/sickgear/providers/nyaa.py @@ -22,7 +22,6 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import unidecode from six import iteritems @@ -51,7 +50,6 @@ class NyaaProvider(generic.TorrentProvider): rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({'info': 'view', 'get': '(?:torrent|magnet:)'})]) for mode in search_params: for search_string in search_params[mode]: - search_string = unidecode(search_string) search_url = self.urls['search'] % ((0, 2)[self.confirmed], search_string.replace('.', ' ')) html = self.get_url(search_url) diff --git a/sickgear/providers/pretome.py b/sickgear/providers/pretome.py index 87acb764..23d067dd 100644 --- a/sickgear/providers/pretome.py +++ b/sickgear/providers/pretome.py @@ -23,7 +23,6 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import unidecode from six import iteritems @@ -58,7 +57,6 @@ class PreToMeProvider(generic.TorrentProvider): rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({'info': 'details', 'get': 'download'})]) for mode in search_params: for search_string in search_params[mode]: - search_string = unidecode(search_string) search_url = self.urls['search'] % search_string html = self.get_url(search_url) diff --git a/sickgear/providers/privatehd.py b/sickgear/providers/privatehd.py index 5f8fbdf0..7ba28252 100644 --- a/sickgear/providers/privatehd.py +++ b/sickgear/providers/privatehd.py @@ -25,7 +25,6 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import filter_iter, unidecode from six import iteritems @@ -93,7 +92,6 @@ class PrivateHDProvider(generic.TorrentProvider): return results for search_string in search_params[mode]: - search_string = unidecode(search_string) search_url = self.urls['search'] % ( '+'.join(search_string.split()), self._categories_string(mode, '')) @@ -120,7 +118,7 @@ class PrivateHDProvider(generic.TorrentProvider): if any(self.filter): marked = ','.join([x.attrs.get('title', '').lower() for x in tr.find_all( 'i', attrs={'class': ['fa-star', 'fa-diamond', 'fa-star-half-o']})]) - munged = ''.join(filter_iter(marked.__contains__, ['free', 'half', 'double'])) + munged = ''.join(filter(marked.__contains__, ['free', 'half', 'double'])) # noinspection PyUnboundLocalVariable if ((non_marked and rc['filter'].search(munged)) or (not non_marked and not rc['filter'].search(munged))): diff --git a/sickgear/providers/ptf.py b/sickgear/providers/ptf.py index 3870b82f..da1c94f2 100644 --- a/sickgear/providers/ptf.py +++ b/sickgear/providers/ptf.py @@ -26,7 +26,6 @@ from .. import logger from ..helpers import anon_url, try_int from bs4_parser import BS4Parser -from _23 import unidecode from six import iteritems @@ -82,7 +81,6 @@ class PTFProvider(generic.TorrentProvider): for mode in search_params: rc['cats'] = re.compile('(?i)cat=(?:%s)' % self._categories_string(mode, template='', delimiter='|')) for search_string in search_params[mode]: - search_string = unidecode(search_string) search_url = self.urls['search'] % ('+'.join(search_string.split()), self._categories_string(mode)) html = self.get_url(search_url) diff --git a/sickgear/providers/revtt.py b/sickgear/providers/revtt.py index 0ee68d6e..50527f39 100644 --- a/sickgear/providers/revtt.py +++ b/sickgear/providers/revtt.py @@ -23,7 +23,6 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import unidecode from six import iteritems @@ -61,7 +60,6 @@ class RevTTProvider(generic.TorrentProvider): for mode in search_params: rc['cats'] = re.compile('(?i)cat=(?:%s)' % self._categories_string(mode, template='', delimiter='|')) for search_string in search_params[mode]: - search_string = unidecode(search_string) html = self.get_url(self.urls['search'] % ('+'.join(search_string.split()), self._categories_string(mode))) diff --git a/sickgear/providers/scenehd.py b/sickgear/providers/scenehd.py index 4b982fe0..74da4457 100644 --- a/sickgear/providers/scenehd.py +++ b/sickgear/providers/scenehd.py @@ -23,7 +23,6 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import unidecode from six import iteritems @@ -62,7 +61,6 @@ class SceneHDProvider(generic.TorrentProvider): 'nuked': 'nuke', 'filter': 'free'})]) for mode in search_params: for search_string in search_params[mode]: - search_string = unidecode(search_string) search_url = self.urls['search'] % (search_string, self._categories_string(mode, '%s', ',')) html = self.get_url(search_url, timeout=90) diff --git a/sickgear/providers/scenetime.py b/sickgear/providers/scenetime.py index 96d14262..f4f783fb 100644 --- a/sickgear/providers/scenetime.py +++ b/sickgear/providers/scenetime.py @@ -23,7 +23,6 @@ from .. import logger from ..helpers import anon_url, try_int from bs4_parser import BS4Parser -from _23 import unidecode from six import iteritems @@ -70,7 +69,6 @@ class SceneTimeProvider(generic.TorrentProvider): urls = [] for search_string in search_params[mode]: urls += [[]] - search_string = unidecode(search_string) search_url = self.urls['search'] % (self._categories_string(), '+'.join(search_string.replace('.', ' ').split()), ('', '&freeleech=on')[self.freeleech]) diff --git a/sickgear/providers/shazbat.py b/sickgear/providers/shazbat.py index b0187e49..3121924d 100644 --- a/sickgear/providers/shazbat.py +++ b/sickgear/providers/shazbat.py @@ -26,7 +26,7 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import unidecode, unquote_plus +from _23 import unquote_plus from six import iteritems, text_type @@ -75,7 +75,6 @@ class ShazbatProvider(generic.TorrentProvider): if self.should_skip(): return results else: - search_string = unidecode(search_string) search_string = search_string.replace(show_detail, '').strip() search_url = self.urls['search'] % search_string html = self.get_url(search_url) diff --git a/sickgear/providers/showrss.py b/sickgear/providers/showrss.py index b630b2fb..e9356e14 100644 --- a/sickgear/providers/showrss.py +++ b/sickgear/providers/showrss.py @@ -25,7 +25,7 @@ from .. import logger from ..helpers import sanitize_scene_name from bs4_parser import BS4Parser -from _23 import decode_str, filter_list, html_unescape, list_keys, list_values, unidecode +from _23 import decode_str, html_unescape from six import iteritems, iterkeys @@ -51,11 +51,11 @@ class ShowRSSProvider(generic.TorrentProvider): def logged_in(self, y): if all([None is y or 'logout' in y, - bool(filter_list(lambda c: 'remember_web_' in c, iterkeys(self.session.cookies)))]): + bool(list(filter(lambda c: 'remember_web_' in c, iterkeys(self.session.cookies))))]): if None is not y: self.shows = dict(re.findall(r'', y)) for k, v in iteritems(self.shows): - self.shows[k] = sanitize_scene_name(html_unescape(unidecode(decode_str(v)))) + self.shows[k] = sanitize_scene_name(html_unescape(decode_str(v))) return True return False @@ -74,13 +74,12 @@ class ShowRSSProvider(generic.TorrentProvider): if 'Cache' == mode: search_url = self.urls['browse'] else: - search_string = unidecode(search_string) - show_name = filter_list(lambda x: x.lower() == re.sub(r'\s.*', '', search_string.lower()), - list_values(self.shows)) + show_name = list(filter(lambda x: x.lower() == re.sub(r'\s.*', '', search_string.lower()), + list(self.shows.values()))) if not show_name: continue - search_url = self.urls['search'] % list_keys(self.shows)[ - list_values(self.shows).index(show_name[0])] + search_url = self.urls['search'] % list(self.shows)[ + list(self.shows.values()).index(show_name[0])] if search_url in urls: continue diff --git a/sickgear/providers/snowfl.py b/sickgear/providers/snowfl.py index e78f1f78..eb7986cc 100644 --- a/sickgear/providers/snowfl.py +++ b/sickgear/providers/snowfl.py @@ -25,7 +25,7 @@ from .. import logger from ..helpers import try_int from json_helper import json_loads -from _23 import b64encodestring, filter_iter, map_list, quote, unidecode +from _23 import b64encodestring, quote from six import iteritems # noinspection PyUnreachableCode @@ -74,7 +74,7 @@ class SnowflProvider(generic.TorrentProvider): params = dict(token=token[0], ent=token[1]) if 'Cache' != mode: - params.update({'ss': quote_fx(unidecode(search_string))}) + params.update({'ss': quote_fx(search_string)}) data_json = None vals = [i for i in range(3, 8)] @@ -92,13 +92,13 @@ class SnowflProvider(generic.TorrentProvider): if self.should_skip(): return results - for item in filter_iter(lambda di: re.match('(?i).*?(tv|television)', - di.get('type', '') or di.get('category', '')) - and (not self.confirmed or di.get('trusted') or di.get('verified')), - data_json or {}): - seeders, leechers, size = map_list(lambda arg: try_int( + for item in filter(lambda di: re.match('(?i).*?(tv|television)', + di.get('type', '') or di.get('category', '')) + and (not self.confirmed or di.get('trusted') or di.get('verified')), + data_json or {}): + seeders, leechers, size = list(map(lambda arg: try_int( *([item.get(arg[0]) if None is not item.get(arg[0]) else item.get(arg[1])]) * 2), - (('seeder', 'seed'), ('leecher', 'leech'), ('size', 'size'))) + (('seeder', 'seed'), ('leecher', 'leech'), ('size', 'size')))) if self._reject_item(seeders, leechers): continue title = item.get('name') or item.get('title') @@ -163,7 +163,7 @@ class SnowflProvider(generic.TorrentProvider): else: from sickgear import providers if 'torlock' in url.lower(): - prov = next(filter_iter(lambda p: 'torlock' == p.name.lower(), (filter_iter( + prov = next(filter(lambda p: 'torlock' == p.name.lower(), (filter( lambda sp: sp.providerType == self.providerType, providers.sortedProviderList())))) state = prov.enabled prov.enabled = True diff --git a/sickgear/providers/speedapp.py b/sickgear/providers/speedapp.py index e730e193..478e20b3 100644 --- a/sickgear/providers/speedapp.py +++ b/sickgear/providers/speedapp.py @@ -21,7 +21,6 @@ from . import generic from ..helpers import try_int from six import string_types -from _23 import filter_list, map_list, unidecode class SpeedAppProvider(generic.TorrentProvider): @@ -55,14 +54,15 @@ class SpeedAppProvider(generic.TorrentProvider): self.perms_needed = self.perms if isinstance(resp, dict) and isinstance(resp.get('scopes'), list): self._authd = True - self.perms_needed = filter_list(lambda x: True is not x, [p in resp.get('scopes') or p for p in self.perms]) + self.perms_needed = list(filter(lambda x: True is not x, + [p in resp.get('scopes') or p for p in self.perms])) if not self.perms_needed: self.categories = None resp = self.get_url(self.urls['cats'], skip_auth=True, parse_json=True, headers=self.auth_header()) if isinstance(resp, list): - categories = [category['id'] for category in filter_list( + categories = [category['id'] for category in list(filter( lambda c: isinstance(c.get('id'), int) and isinstance(c.get('name'), string_types) - and c.get('name').upper() in ('TV PACKS', 'TV HD', 'TV SD'), resp)] + and c.get('name').upper() in ('TV PACKS', 'TV HD', 'TV SD'), resp))] self.categories = {'Cache': categories, 'Episode': categories, 'Season': categories} return not any(self.perms_needed) @@ -81,7 +81,7 @@ class SpeedAppProvider(generic.TorrentProvider): for mode in search_params: for search_string in search_params[mode]: search_url = self.urls['search'] % ( - unidecode(search_string), self._categories_string(mode, template='categories[]=%s')) + search_string, self._categories_string(mode, template='categories[]=%s')) data_json = self.get_url(search_url, skip_auth=True, parse_json=True, headers=self.auth_header()) if self.should_skip(): @@ -111,10 +111,10 @@ class SpeedAppProvider(generic.TorrentProvider): ('%s_api_key_tip' % self.get_id()) == key and \ ((not_authd or self.perms_needed) and ('create token at %s site
' - 'with perms %s' % (self.url_base, self.name, map_list( + 'with perms %s' % (self.url_base, self.name, list(map( lambda p: 't.read' in p and 'Read torrents' or 't.down' in p and 'Download torrents' - or 'ch.read' in p and 'Read snatches', self.perms_needed))) + or 'ch.read' in p and 'Read snatches', self.perms_needed)))) .replace('[', '').replace(']', '') or 'token is valid and required permissions are enabled') \ or '' diff --git a/sickgear/providers/speedcd.py b/sickgear/providers/speedcd.py index 8f21401f..9964362a 100644 --- a/sickgear/providers/speedcd.py +++ b/sickgear/providers/speedcd.py @@ -25,7 +25,7 @@ from ..helpers import try_int from bs4_parser import BS4Parser from requests.cookies import cookiejar_from_dict -from _23 import filter_list, quote, unquote +from _23 import quote, unquote from six import string_types, iteritems @@ -63,12 +63,12 @@ class SpeedCDProvider(generic.TorrentProvider): self.session.cookies.clear() json = self.get_url(self.urls['login_1'], skip_auth=True, post_data={'username': self.username}, parse_json=True) - resp = filter_list(lambda l: isinstance(l, list), json.get('Fs', [])) + resp = list(filter(lambda l: isinstance(l, list), json.get('Fs', []))) def get_html(_resp): for cur_item in _resp: if isinstance(cur_item, list): - _html = filter_list(lambda s: isinstance(s, string_types) and 'password' in s, cur_item) + _html = list(filter(lambda s: isinstance(s, string_types) and 'password' in s, cur_item)) if not _html: _html = get_html(cur_item) if _html: @@ -128,13 +128,13 @@ class SpeedCDProvider(generic.TorrentProvider): cnt = len(items[mode]) try: - html = filter_list(lambda l: isinstance(l, list), data_json.get('Fs', [])) + html = list(filter(lambda l: isinstance(l, list), data_json.get('Fs', []))) while html: if html and all(isinstance(x, string_types) for x in html): str_lengths = [len(x) for x in html] html = html[str_lengths.index(max(str_lengths))] break - html = filter_list(lambda l: isinstance(l, list), html) + html = list(filter(lambda l: isinstance(l, list), html)) if html and 0 < len(html): html = html[0] diff --git a/sickgear/providers/thepiratebay.py b/sickgear/providers/thepiratebay.py index 51cbd129..bf57db9f 100644 --- a/sickgear/providers/thepiratebay.py +++ b/sickgear/providers/thepiratebay.py @@ -25,7 +25,7 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import b64decodestring, unidecode +from _23 import b64decodestring from six import iteritems @@ -90,7 +90,6 @@ class ThePirateBayProvider(generic.TorrentProvider): for mode in search_params: for search_string in search_params[mode]: - search_string = unidecode(search_string) if 'Cache' != mode: search_url = self.urls['api'] % search_string diff --git a/sickgear/providers/tokyotoshokan.py b/sickgear/providers/tokyotoshokan.py index 39592d61..338f38f9 100644 --- a/sickgear/providers/tokyotoshokan.py +++ b/sickgear/providers/tokyotoshokan.py @@ -22,7 +22,7 @@ from .. import show_name_helpers, tvcache from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import filter_list, map_list, urlencode +from _23 import urlencode from six import iteritems @@ -78,10 +78,10 @@ class TokyoToshokanProvider(generic.TorrentProvider): info = top.find('td', class_='desc-top') title = info and re.sub(r'[ .]{2,}', '.', info.get_text().strip()) - links = info and map_list(lambda l: l.get('href', ''), info.find_all('a')) or None + links = info and list(map(lambda l: l.get('href', ''), info.find_all('a'))) or None download_url = self._link( - (filter_list(lambda l: 'magnet:' in l, links) - or filter_list(lambda l: not re.search(r'(magnet:|\.se).+', l), links))[0]) + (list(filter(lambda l: 'magnet:' in l, links)) + or list(filter(lambda l: not re.search(r'(magnet:|\.se).+', l), links)))[0]) except (AttributeError, TypeError, ValueError, IndexError): continue diff --git a/sickgear/providers/torlock.py b/sickgear/providers/torlock.py index 52fa16b8..79374449 100644 --- a/sickgear/providers/torlock.py +++ b/sickgear/providers/torlock.py @@ -23,7 +23,7 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import b64decodestring, quote_plus, unidecode +from _23 import b64decodestring, quote_plus from six import iteritems @@ -66,8 +66,6 @@ class TorLockProvider(generic.TorrentProvider): for mode in search_params: for search_string in search_params[mode]: - search_string = unidecode(search_string) - search_url = self.urls['browse'] if 'Cache' == mode \ else self.urls['search'] % (quote_plus(search_string).replace('+', '-')) diff --git a/sickgear/providers/torrenting.py b/sickgear/providers/torrenting.py index cf17d82e..0870d459 100644 --- a/sickgear/providers/torrenting.py +++ b/sickgear/providers/torrenting.py @@ -23,7 +23,6 @@ from .. import logger from ..helpers import try_int from bs4_parser import BS4Parser -from _23 import unidecode from six import iteritems @@ -67,7 +66,6 @@ class TorrentingProvider(generic.TorrentProvider): 'get': 'download'})]) for mode in search_params: for search_string in search_params[mode]: - search_string = unidecode(search_string) search_url = self.urls['search'] % (self._categories_string(), search_string) html = self.get_url(search_url) diff --git a/sickgear/providers/torrentleech.py b/sickgear/providers/torrentleech.py index f65a3efb..148353f9 100644 --- a/sickgear/providers/torrentleech.py +++ b/sickgear/providers/torrentleech.py @@ -21,8 +21,7 @@ import re from . import generic from ..helpers import anon_url, try_int -from _23 import unidecode -from six import iteritems, PY2 +from six import iteritems class TorrentLeechProvider(generic.TorrentProvider): @@ -66,7 +65,7 @@ class TorrentLeechProvider(generic.TorrentProvider): for page in range((3, 5)['Cache' == mode])[1:]: urls[-1] += [self.urls[('search', 'browse')['Cache' == mode]] % { 'cats': self._categories_string(mode, '', ','), - 'query': unidecode(search_string) or search_string, + 'query': search_string, 'x': '%spage/%s' % (('facets/tags:FREELEECH/', '')[not self.freeleech], page) }] results += self._search_urls(mode, last_recent_search, urls) @@ -125,8 +124,7 @@ class TorrentLeechProvider(generic.TorrentProvider): download_url = None if dl and dl_id: # noinspection PyUnresolvedReferences - download_url = self._link('download/%s/%s' % (dl_id, dl), - url_quote=PY2 and isinstance(dl, unicode) or None) + download_url = self._link('download/%s/%s' % (dl_id, dl)) except (BaseException, Exception): continue diff --git a/sickgear/providers/tvchaosuk.py b/sickgear/providers/tvchaosuk.py index 244759cb..8897cf92 100644 --- a/sickgear/providers/tvchaosuk.py +++ b/sickgear/providers/tvchaosuk.py @@ -27,7 +27,7 @@ from ..helpers import try_int from bs4_parser import BS4Parser from dateutil.parser import parse -from _23 import unidecode, unquote_plus +from _23 import unquote_plus from six import iteritems @@ -80,7 +80,7 @@ class TVChaosUKProvider(generic.TorrentProvider): 'info': r'/torrents?/(?P(?P\d{2,})[^"]*)', 'get': 'download'})]) for mode in search_params: for search_string in search_params[mode]: - search_string = unidecode(unquote_plus(search_string)) + search_string = unquote_plus(search_string) vals = [i for i in range(5, 16)] random.SystemRandom().shuffle(vals) diff --git a/sickgear/providers/xspeeds.py b/sickgear/providers/xspeeds.py index 4b11a356..e500b438 100644 --- a/sickgear/providers/xspeeds.py +++ b/sickgear/providers/xspeeds.py @@ -25,7 +25,6 @@ from .. import logger from ..helpers import has_anime, try_int from bs4_parser import BS4Parser -from _23 import unidecode from six import iteritems @@ -70,7 +69,6 @@ class XspeedsProvider(generic.TorrentProvider): for search_string in search_params[mode]: search_string = search_string.replace(u'£', '%') search_string = re.sub(r'[\s.]+', '%', search_string) - search_string = unidecode(search_string) kwargs = dict(post_data={'keywords': search_string, 'do': 'quick_sort', 'page': '0', 'category': '0', 'search_type': 't_name', 'sort': 'added', diff --git a/sickgear/scene_exceptions.py b/sickgear/scene_exceptions.py index a9fa0afa..9aa9591d 100644 --- a/sickgear/scene_exceptions.py +++ b/sickgear/scene_exceptions.py @@ -35,8 +35,8 @@ from .sgdatetime import timestamp_near import lib.rarfile.rarfile as rarfile -from _23 import filter_iter, list_range, map_iter -from six import iteritems, PY2, text_type +from _23 import list_range +from six import iteritems, text_type # noinspection PyUnreachableCode if False: @@ -303,7 +303,7 @@ def retrieve_exceptions(): list(cur_tvid_prodid))] # if this exception isn't already in the DB then add it - for cur_exception_dict in filter_iter(lambda e: e not in existing_exceptions, exception_dict[cur_tvid_prodid]): + for cur_exception_dict in filter(lambda e: e not in existing_exceptions, exception_dict[cur_tvid_prodid]): try: cur_exception, cur_season = next(iteritems(cur_exception_dict)) except (BaseException, Exception): @@ -311,9 +311,6 @@ def retrieve_exceptions(): logger.log(traceback.format_exc(), logger.ERROR) continue - if PY2 and not isinstance(cur_exception, text_type): - cur_exception = text_type(cur_exception, 'utf-8', 'replace') - cl.append(['INSERT INTO scene_exceptions' ' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)', list(cur_tvid_prodid) + [cur_exception, cur_season]]) @@ -368,9 +365,6 @@ def update_scene_exceptions(tvid, prodid, scene_exceptions): exceptionsCache[(tvid, prodid)][cur_season].append(cur_exception) - if PY2 and not isinstance(cur_exception, text_type): - cur_exception = text_type(cur_exception, 'utf-8', 'replace') - my_db.action('INSERT INTO scene_exceptions' ' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)', [tvid, prodid, cur_exception, cur_season]) @@ -489,7 +483,7 @@ def _anidb_exceptions_fetcher(): if should_refresh('anidb'): logger.log(u'Checking for AniDB scene exception updates') - for cur_show_obj in filter_iter(lambda _s: _s.is_anime and TVINFO_TVDB == _s.tvid, sickgear.showList): + for cur_show_obj in filter(lambda _s: _s.is_anime and TVINFO_TVDB == _s.tvid, sickgear.showList): try: anime = create_anidb_obj(name=cur_show_obj.name, tvdbid=cur_show_obj.prodid, autoCorrectName=True) except (BaseException, Exception): @@ -559,8 +553,8 @@ def _xem_get_ids(infosrc_name, xem_origin): % (task.lower() % ('', 's'), infosrc_name, url), logger.ERROR) else: if 'success' == parsed_json.get('result', '') and 'data' in parsed_json: - xem_ids = list(set(filter_iter(lambda prodid: 0 < prodid, - map_iter(lambda pid: helpers.try_int(pid), parsed_json['data'])))) + xem_ids = list(set(filter(lambda prodid: 0 < prodid, + map(lambda pid: helpers.try_int(pid), parsed_json['data'])))) if 0 == len(xem_ids): logger.log(u'Failed %s %s, no data items parsed from URL: %s' % (task.lower() % ('', 's'), infosrc_name, url), logger.WARNING) diff --git a/sickgear/scene_numbering.py b/sickgear/scene_numbering.py index 8bfa2cb7..a93d4776 100644 --- a/sickgear/scene_numbering.py +++ b/sickgear/scene_numbering.py @@ -32,8 +32,6 @@ from .helpers import try_int from .scene_exceptions import xem_ids_list from .sgdatetime import timestamp_near -from _23 import filter_iter, map_list - # noinspection PyUnreachableCode if False: from typing import Dict, List, Optional, Tuple, Union @@ -718,8 +716,8 @@ def _get_absolute_numbering_for_show(tbl, tvid, prodid): """ % (tbl, ('indexer_id', 'showid')['tv_episodes' == tbl]), [int(tvid), int(prodid)]) for cur_row in sql_result: - season, episode, abs_num = map_list(lambda x: try_int(cur_row[x], None), - ('season', 'episode', 'absolute_number')) + season, episode, abs_num = list(map(lambda x: try_int(cur_row[x], None), + ('season', 'episode', 'absolute_number'))) if None is season and None is episode and None is not abs_num: season, episode, _ = _get_sea(tvid, prodid, absolute_number=abs_num) @@ -815,7 +813,7 @@ def xem_refresh(tvid, prodid, force=False): return if 'success' in parsed_json['result']: - cl = map_list(lambda entry: [ + cl = list(map(lambda entry: [ """ UPDATE tv_episodes SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? @@ -824,7 +822,7 @@ def xem_refresh(tvid, prodid, force=False): for v in ('season', 'episode', 'absolute')] + [tvid, prodid] + [entry.get(xem_origin).get(v) for v in ('season', 'episode')] - ], filter_iter(lambda x: 'scene' in x, parsed_json['data'])) + ], filter(lambda x: 'scene' in x, parsed_json['data']))) if 0 < len(cl): my_db = db.DBConnection() diff --git a/sickgear/search.py b/sickgear/search.py index 898f1f36..c7609512 100644 --- a/sickgear/search.py +++ b/sickgear/search.py @@ -34,7 +34,6 @@ from .common import DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER, MULTI_ from .providers.generic import GenericProvider from .tv import TVEpisode, TVShow -from _23 import filter_list, filter_iter, list_values from six import iteritems, itervalues, string_types # noinspection PyUnreachableCode @@ -590,7 +589,7 @@ def search_for_needed_episodes(ep_obj_list): orig_thread_name = threading.current_thread().name - providers = filter_list(lambda x: x.is_active() and x.enable_recentsearch, sickgear.providers.sortedProviderList()) + providers = list(filter(lambda x: x.is_active() and x.enable_recentsearch, sickgear.providers.sortedProviderList())) for cur_provider in providers: threading.current_thread().name = '%s :: [%s]' % (orig_thread_name, cur_provider.name) @@ -646,7 +645,7 @@ def search_for_needed_episodes(ep_obj_list): logger.log('Failed recent search of %s enabled provider%s. More info in debug log.' % ( len(providers), helpers.maybe_plural(providers)), logger.ERROR) - return list_values(found_results) + return list(found_results.values()) def can_reject(release_name): @@ -738,10 +737,10 @@ def _search_provider_thread(provider, provider_results, show_obj, ep_obj_list, m # make a list of all the results for this provider for cur_search_result in search_result_list: # skip non-tv crap - search_result_list[cur_search_result] = filter_list( + search_result_list[cur_search_result] = list(filter( lambda ep_item: ep_item.show_obj == show_obj and show_name_helpers.pass_wordlist_checks( ep_item.name, parse=False, indexer_lookup=False, show_obj=ep_item.show_obj), - search_result_list[cur_search_result]) + search_result_list[cur_search_result])) if cur_search_result in provider_results: provider_results[cur_search_result] += search_result_list[cur_search_result] @@ -941,7 +940,7 @@ def search_providers( # if not, break it apart and add them as the lowest priority results individual_results = nzbSplitter.splitResult(best_season_result) - for cur_result in filter_iter( + for cur_result in filter( lambda r: r.show_obj == show_obj and show_name_helpers.pass_wordlist_checks( r.name, parse=False, indexer_lookup=False, show_obj=r.show_obj), individual_results): ep_num = None diff --git a/sickgear/search_backlog.py b/sickgear/search_backlog.py index 904d54a9..eb659770 100644 --- a/sickgear/search_backlog.py +++ b/sickgear/search_backlog.py @@ -28,7 +28,6 @@ from .search import wanted_episodes from .sgdatetime import SGDatetime, timestamp_near from .tv import TVidProdid, TVEpisode, TVShow -from _23 import filter_list, map_iter, map_list from six import iteritems, itervalues, moves # noinspection PyUnreachableCode @@ -212,7 +211,7 @@ class BacklogSearcher(object): any_torrent_enabled = continued_backlog = False if not force and standard_backlog and (datetime.datetime.now() - datetime.datetime.fromtimestamp( self._get_last_runtime())) < datetime.timedelta(hours=23): - any_torrent_enabled = any(map_iter( + any_torrent_enabled = any(map( lambda x: x.is_active() and getattr(x, 'enable_backlog', None) and GenericProvider.TORRENT == x.providerType, sickgear.providers.sortedProviderList())) @@ -291,8 +290,8 @@ class BacklogSearcher(object): if not runparts and parts: runparts = parts[0] - wanted_list = filter_list( - lambda wi: wi and next(itervalues(wi))[0].show_obj.tvid_prodid in runparts, wanted_list) + wanted_list = list(filter( + lambda wi: wi and next(itervalues(wi))[0].show_obj.tvid_prodid in runparts, wanted_list)) limited_wanted_list = [] if standard_backlog and not any_torrent_enabled and runparts: @@ -314,8 +313,8 @@ class BacklogSearcher(object): for i, l in enumerate(parts): if 0 == i: continue - cl += map_list(lambda m: ['INSERT INTO backlogparts (part, indexer, indexerid) VALUES (?,?,?)', - [i + 1] + TVidProdid(m).list], l) + cl += list(map(lambda m: ['INSERT INTO backlogparts (part, indexer, indexerid) VALUES (?,?,?)', + [i + 1] + TVidProdid(m).list], l)) if 0 < len(cl): my_db.mass_action(cl) diff --git a/sickgear/search_queue.py b/sickgear/search_queue.py index 36f804dd..62844ac8 100644 --- a/sickgear/search_queue.py +++ b/sickgear/search_queue.py @@ -34,8 +34,6 @@ from .classes import Proper, SimpleNamespace from .search import wanted_episodes, get_aired_in_season, set_wanted_aired from .tv import TVEpisode -from _23 import filter_list - # noinspection PyUnreachableCode if False: from typing import Any, AnyStr, Dict, List, Optional, Union @@ -520,8 +518,8 @@ class RecentSearchQueueItem(generic_queue.QueueItem): orig_thread_name = threading.current_thread().name threads = [] - providers = filter_list(lambda x: x.is_active() and x.enable_recentsearch, - sickgear.providers.sortedProviderList()) + providers = list(filter(lambda x: x.is_active() and x.enable_recentsearch, + sickgear.providers.sortedProviderList())) for cur_provider in providers: if not cur_provider.cache.should_update(): continue diff --git a/sickgear/sgdatetime.py b/sickgear/sgdatetime.py index 86bb84b5..043bedb0 100644 --- a/sickgear/sgdatetime.py +++ b/sickgear/sgdatetime.py @@ -23,7 +23,7 @@ import sys import sickgear from dateutil import tz -from six import integer_types, PY2, string_types +from six import integer_types, string_types # noinspection PyUnreachableCode if False: @@ -283,21 +283,14 @@ class SGDatetime(datetime.datetime): return (default, timestamp)[isinstance(timestamp, (float, integer_types))] -if PY2: - """ - Use `timestamp_near` for a timezone aware UTC timestamp in the near future or recent past. - - Under py3, using the faster variable assigned cpython callable, so py2 is set up to mimic the signature types. - Note: the py3 callable is limited to datetime.datetime and does not work with datetime.date. - """ - def _py2timestamp(dt=None): +# noinspection PyUnreachableCode +if False: + # just to trick pycharm in correct type detection + def timestamp_near(d_t): # type: (datetime.datetime) -> float - try: - import time - return int(time.mktime(dt.timetuple())) - except (BaseException, Exception): - return 0 - timestamp_near = _py2timestamp # type: Callable[[datetime.datetime], float] -else: - # py3 native timestamp uses milliseconds - timestamp_near = datetime.datetime.timestamp # type: Callable[[datetime.datetime], float] + pass + + +# py3 native timestamp uses milliseconds +# noinspection PyRedeclaration +timestamp_near = datetime.datetime.timestamp diff --git a/sickgear/show_name_helpers.py b/sickgear/show_name_helpers.py index a18e5878..b0c00027 100644 --- a/sickgear/show_name_helpers.py +++ b/sickgear/show_name_helpers.py @@ -28,7 +28,7 @@ from .name_parser.parser import InvalidNameException, InvalidShowException, Name from .scene_exceptions import get_scene_exceptions from sg_helpers import scantree -from _23 import map_list, quote_plus +from _23 import quote_plus from six import iterkeys, itervalues # noinspection PyUnreachableCode @@ -237,7 +237,7 @@ def get_show_names_all_possible(show_obj, season=-1, scenify=True, spacer='.', f show_names = list(set( all_possible_show_names(show_obj, season=season, force_anime=force_anime))) # type: List[AnyStr] if scenify: - show_names = map_list(sanitize_scene_name, show_names) + show_names = list(map(sanitize_scene_name, show_names)) return url_encode(show_names, spacer) diff --git a/sickgear/trakt_helpers.py b/sickgear/trakt_helpers.py index acbbb398..b1a8314f 100644 --- a/sickgear/trakt_helpers.py +++ b/sickgear/trakt_helpers.py @@ -5,7 +5,7 @@ import re import sickgear from .helpers import try_int -from _23 import decode_bytes, decode_str, list_items +from _23 import decode_bytes, decode_str from six import iteritems, text_type @@ -51,7 +51,7 @@ def build_config_string(config): :param config: dicts of Trakt account id, parent location :return: string csv of parsed config kwargs for config file """ - return text_type(list_items(config)) + return text_type(list(config.items())) def trakt_collection_remove_account(account_id): diff --git a/sickgear/tv.py b/sickgear/tv.py index af779dbd..7edb23d2 100644 --- a/sickgear/tv.py +++ b/sickgear/tv.py @@ -63,8 +63,7 @@ from lib.tvinfo_base import RoleTypes, TVINFO_FACEBOOK, TVINFO_INSTAGRAM, TVINFO from lib.tvinfo_base.exceptions import * from sg_helpers import calc_age, int_to_time, remove_file_perm, time_to_int -from _23 import filter_iter, filter_list, list_keys -from six import integer_types, iteritems, itervalues, moves, PY2, string_types +from six import integer_types, iteritems, itervalues, moves, string_types # noinspection PyUnreachableCode if False: @@ -172,9 +171,9 @@ class TVidProdid(object): if coreid_warnings: logger.log('%s\n' % pre_msg + '|>%s^-- Note: Bootstrap & Tornado startup functions stripped from traceback log.' % - '|>'.join(filter_iter(lambda text: not re.search(r'(?i)bootstrap|traceback\.' - r'format_stack|pydevd|tornado' - r'|webserveinit', text), + '|>'.join(filter(lambda text: not re.search(r'(?i)bootstrap|traceback\.' + r'format_stack|pydevd|tornado' + r'|webserveinit', text), traceback.format_stack(inspect.currentframe())))) except IndexError: pass @@ -379,7 +378,7 @@ class Person(Referential): akas=None, # type: Set[AnyStr] character_obj=None, # type: Character tmp_character_obj=None # type: Character - ): # type: (...) -> Person + ): super(Person, self).__init__(sid) @@ -789,6 +788,8 @@ class Person(Referential): if None is not rp: if confirmed_on_src: for i in (TVINFO_TRAKT, TVINFO_IMDB, TVINFO_TMDB, TVINFO_TVMAZE, TVINFO_TVDB): + if not rp.ids.get(i): + continue # in case it's the current source use it's id and lock if from being changed if cur_tv_info_src == i and rp.ids.get(i): source_confirmed[i] = True @@ -803,6 +804,8 @@ class Person(Referential): self.dirty_ids = True for i in (TVINFO_INSTAGRAM, TVINFO_TWITTER, TVINFO_FACEBOOK, TVINFO_WIKIPEDIA): + if not rp.social_ids.get(i): + continue if rp.social_ids.get(i) and not self.ids.get(i) or \ (rp.social_ids.get(i) and rp.social_ids.get(i) != self.ids.get(i)): self.ids[i] = rp.social_ids[i] @@ -892,11 +895,12 @@ class Person(Referential): ] if force or self.dirty_ids: for s, v in iteritems(self.ids): - cl.extend([ - ['UPDATE person_ids SET src_id = ? WHERE person_id = ? AND src = ?', [v, self.id, s]], - ["INSERT INTO person_ids (src, src_id, person_id) SELECT %s, '%s', %s WHERE changes() == 0" - % (s, v, self.id)] - ]) + if v: + cl.extend([ + ['UPDATE person_ids SET src_id = ? WHERE person_id = ? AND src = ?', [v, self.id, s]], + ["INSERT INTO person_ids (src, src_id, person_id) SELECT %s, '%s', %s WHERE changes() == 0" + % (s, v, self.id)] + ]) if cl: r_id = my_db.mass_action(cl) if r_id and r_id[-1:][0]: @@ -3152,9 +3156,9 @@ class TVShow(TVShowBase): if isinstance(imdb_tv.get('numberOfEpisodes'), (int, string_types)): imdb_info['episode_count'] = try_int(imdb_tv.get('numberOfEpisodes'), 1) if isinstance(imdb_tv.get('genres'), (list, tuple)): - imdb_info['genres'] = '|'.join(filter_iter(lambda _v: _v, imdb_tv.get('genres'))) + imdb_info['genres'] = '|'.join(filter(lambda _v: _v, imdb_tv.get('genres'))) if isinstance(imdb_tv.get('origins'), list): - imdb_info['country_codes'] = '|'.join(filter_iter(lambda _v: _v, imdb_tv.get('origins'))) + imdb_info['country_codes'] = '|'.join(filter(lambda _v: _v, imdb_tv.get('origins'))) # certificate if isinstance(imdb_certificates.get('certificates'), dict): @@ -3256,7 +3260,7 @@ class TVShow(TVShowBase): action = ('delete', 'trash')[sickgear.TRASH_REMOVE_SHOW] # remove self from show list - sickgear.showList = filter_list(lambda so: so.tvid_prodid != self.tvid_prodid, sickgear.showList) + sickgear.showList = list(filter(lambda so: so.tvid_prodid != self.tvid_prodid, sickgear.showList)) try: del sickgear.showDict[self.sid_int] except (BaseException, Exception): @@ -4220,8 +4224,6 @@ class TVEpisode(TVEpisodeBase): tzinfo = self._show_obj.timezone elif isinstance(self._show_obj.network, string_types) and self._show_obj.network: tzinfo = network_timezones.get_network_timezone(self._show_obj.network) - if PY2: - return SGDatetime.combine(self.airdate, ep_time).replace(tzinfo=tzinfo).timestamp_far() return SGDatetime.combine(self.airdate, ep_time, tzinfo=tzinfo).timestamp_far() return None @@ -4964,7 +4966,7 @@ class TVEpisode(TVEpisodeBase): result_name = pattern # do the replacements - for cur_replacement in sorted(list_keys(replace_map), reverse=True): + for cur_replacement in sorted(list(replace_map), reverse=True): result_name = result_name.replace(cur_replacement, helpers.sanitize_filename(replace_map[cur_replacement])) result_name = result_name.replace(cur_replacement.lower(), helpers.sanitize_filename(replace_map[cur_replacement].lower())) diff --git a/sickgear/tvcache.py b/sickgear/tvcache.py index 6450b15d..16d5d967 100644 --- a/sickgear/tvcache.py +++ b/sickgear/tvcache.py @@ -30,9 +30,6 @@ from .rssfeeds import RSSFeeds from .sgdatetime import timestamp_near from .tv import TVEpisode -from _23 import filter_list, map_iter -from six import PY2, text_type - # noinspection PyUnreachableCode if False: from typing import Any, AnyStr, Dict, List, Tuple, Union @@ -315,7 +312,7 @@ class TVCache(object): if season_number and episode_numbers: # store episodes as a separated string - episode_text = '|%s|' % '|'.join(map_iter(str, episode_numbers)) + episode_text = '|%s|' % '|'.join(map(str, episode_numbers)) # get the current timestamp cur_timestamp = int(timestamp_near(datetime.datetime.now())) @@ -323,9 +320,6 @@ class TVCache(object): # get quality of release quality = parse_result.quality - if PY2 and not isinstance(name, text_type): - name = text_type(name, 'utf-8', 'replace') - # get release group release_group = parse_result.release_group @@ -376,7 +370,7 @@ class TVCache(object): if date: sql += ' AND time >= ' + str(int(time.mktime(date.timetuple()))) - return filter_list(lambda x: x['indexerid'] != 0, my_db.select(sql, [self.providerID])) + return list(filter(lambda x: x['indexerid'] != 0, my_db.select(sql, [self.providerID]))) def findNeededEpisodes(self, ep_obj_list, manual_search=False): # type: (Union[TVEpisode, List[TVEpisode]], bool) -> Dict[TVEpisode, SearchResult] diff --git a/sickgear/version_checker.py b/sickgear/version_checker.py index 23609e71..7da64b0e 100644 --- a/sickgear/version_checker.py +++ b/sickgear/version_checker.py @@ -35,7 +35,6 @@ from sg_helpers import cmdline_runner, get_url # noinspection PyUnresolvedReferences from six.moves import urllib from six import string_types -from _23 import list_keys # noinspection PyUnreachableCode if False: @@ -83,7 +82,7 @@ class PackagesUpdater(object): ui.notifications.message(msg) return False - logger.log('Update(s) for %s found %s' % (self.install_type, list_keys(sickgear.UPDATES_TODO))) + logger.log('Update(s) for %s found %s' % (self.install_type, list(sickgear.UPDATES_TODO))) # save updates_todo to config to be loaded after restart sickgear.save_config() diff --git a/sickgear/webapi.py b/sickgear/webapi.py index 691f2c6c..bd0e6807 100644 --- a/sickgear/webapi.py +++ b/sickgear/webapi.py @@ -55,8 +55,8 @@ from .tv import TVEpisode, TVShow, TVidProdid from .webserve import AddShows import dateutil.parser -from _23 import decode_str, list_keys, unquote_plus -from six import integer_types, iteritems, iterkeys, PY2, string_types, text_type +from _23 import decode_str, unquote_plus +from six import integer_types, iteritems, iterkeys, string_types, text_type # noinspection PyUnreachableCode if False: @@ -253,8 +253,6 @@ class Api(webserve.BaseHandler): result = function(*ag) return result except Exception as e: - if PY2: - logger.log('traceback: %s' % traceback.format_exc(), logger.ERROR) logger.log(ex(e), logger.ERROR) raise e @@ -1043,7 +1041,7 @@ class CMD_SickGearComingEpisodes(ApiCall): ep['network'] and network_timezones.get_network_timezone(ep['network'], return_name=True)[1]) # remove all field we don't want for api response - for cur_f in list_keys(ep): + for cur_f in list(ep): if cur_f not in [ # fields to preserve 'absolute_number', 'air_by_date', 'airdate', 'airs', 'archive_firstmatch', 'classification', 'data_network', 'data_show_name', diff --git a/sickgear/webserve.py b/sickgear/webserve.py index 6439dfd5..18f14bd9 100644 --- a/sickgear/webserve.py +++ b/sickgear/webserve.py @@ -90,9 +90,9 @@ from lib.api_trakt.exceptions import TraktException, TraktAuthException import lib.rarfile.rarfile as rarfile -from _23 import decode_bytes, decode_str, filter_list, filter_iter, getargspec, list_keys, list_values, \ - map_consume, map_iter, map_list, map_none, ordered_dict, quote_plus, unquote_plus, urlparse -from six import binary_type, integer_types, iteritems, iterkeys, itervalues, moves, PY2, string_types +from _23 import decode_bytes, decode_str, getargspec, \ + map_consume, map_none, quote_plus, unquote_plus, urlparse +from six import binary_type, integer_types, iteritems, iterkeys, itervalues, moves, string_types # noinspection PyUnreachableCode if False: @@ -198,9 +198,7 @@ class RouteHandler(LegacyBaseHandler): return [self.decode_data(d) for d in data] if not isinstance(data, string_types): return data - if not PY2: - return data.encode('latin1').decode('utf-8') - return data.decode('utf-8') + return data.encode('latin1').decode('utf-8') @gen.coroutine def route_method(self, route, use_404=False, limit_route=None, xsrf_filter=True): @@ -240,7 +238,7 @@ class RouteHandler(LegacyBaseHandler): # no filtering for legacy and routes that depend on *args and **kwargs result = yield self.async_call(method, request_kwargs) # method(**request_kwargs) else: - filter_kwargs = dict(filter_iter(lambda kv: kv[0] in method_args, iteritems(request_kwargs))) + filter_kwargs = dict(filter(lambda kv: kv[0] in method_args, iteritems(request_kwargs))) result = yield self.async_call(method, filter_kwargs) # method(**filter_kwargs) self.finish(result) @@ -249,8 +247,6 @@ class RouteHandler(LegacyBaseHandler): try: return function(**kw) except (BaseException, Exception) as e: - if PY2: - raise Exception(traceback.format_exc().replace('\n', '
')) raise e def page_not_found(self): @@ -1393,7 +1389,7 @@ r.close() if data: my_db = db.DBConnection(row_type='dict') - media_paths = map_list(lambda arg: os.path.basename(arg[1]['path_file']), iteritems(data)) + media_paths = list(map(lambda arg: os.path.basename(arg[1]['path_file']), iteritems(data))) def chunks(lines, n): for c in range(0, len(lines), n): @@ -1553,13 +1549,13 @@ class Home(MainHandler): index = 0 if 'custom' == sickgear.SHOWLIST_TAGVIEW: for name in sickgear.SHOW_TAGS: - results = filter_list(lambda so: so.tag == name, sickgear.showList) + results = list(filter(lambda so: so.tag == name, sickgear.showList)) if results: t.showlists.append(['container%s' % index, name, results]) index += 1 elif 'anime' == sickgear.SHOWLIST_TAGVIEW: - show_results = filter_list(lambda so: not so.anime, sickgear.showList) - anime_results = filter_list(lambda so: so.anime, sickgear.showList) + show_results = list(filter(lambda so: not so.anime, sickgear.showList)) + anime_results = list(filter(lambda so: so.anime, sickgear.showList)) if show_results: t.showlists.append(['container%s' % index, 'Show List', show_results]) index += 1 @@ -1904,7 +1900,7 @@ class Home(MainHandler): ' AND notify_list != ""', [TVidProdid.glue]) notify_lists = {} - for r in filter_iter(lambda x: x['notify_list'].strip(), rows): + for r in filter(lambda x: x['notify_list'].strip(), rows): # noinspection PyTypeChecker notify_lists[r['tvid_prodid']] = r['notify_list'] @@ -2265,7 +2261,7 @@ class Home(MainHandler): del (ep_counts['totals'][0]) ep_counts['eps_all'] = sum(itervalues(ep_counts['totals'])) - ep_counts['eps_most'] = max(list_values(ep_counts['totals']) + [0]) + ep_counts['eps_most'] = max(list(ep_counts['totals'].values()) + [0]) all_seasons = sorted(iterkeys(ep_counts['totals']), reverse=True) t.lowest_season, t.highest_season = all_seasons and (all_seasons[-1], all_seasons[0]) or (0, 0) @@ -2436,7 +2432,7 @@ class Home(MainHandler): if 'custom' == sickgear.SHOWLIST_TAGVIEW: sorted_show_lists = [] for tag in sickgear.SHOW_TAGS: - results = filter_list(lambda _so: _so.tag == tag, sickgear.showList) + results = list(filter(lambda _so: _so.tag == tag, sickgear.showList)) if results: sorted_show_lists.append([tag, sorted(results, key=lambda x: titler(x.unique_name))]) # handle orphaned shows @@ -2841,7 +2837,7 @@ class Home(MainHandler): errors = [] with show_obj.lock: - show_obj.quality = Quality.combineQualities(map_list(int, any_qualities), map_list(int, best_qualities)) + show_obj.quality = Quality.combineQualities(list(map(int, any_qualities)), list(map(int, best_qualities))) show_obj.upgrade_once = upgrade_once # reversed for now @@ -3371,7 +3367,7 @@ class Home(MainHandler): sickgear.search_queue.remove_old_fifo(sickgear.search_queue.MANUAL_SEARCH_HISTORY) results = sickgear.search_queue.MANUAL_SEARCH_HISTORY - for item in filter_iter(lambda q: hasattr(q, 'segment_ns'), queued): + for item in filter(lambda q: hasattr(q, 'segment_ns'), queued): for ep_ns in item.segment_ns: ep_data, uniq_sxe = self.prepare_episode(ep_ns, 'queued') ep_data_list.append(ep_data) @@ -3387,9 +3383,9 @@ class Home(MainHandler): seen_eps.add(uniq_sxe) episode_params = dict(searchstate='finished', retrystate=True, statusoverview=True) - for item in filter_iter(lambda r: hasattr(r, 'segment_ns') and ( + for item in filter(lambda r: hasattr(r, 'segment_ns') and ( not tvid_prodid or tvid_prodid == str(r.show_ns.tvid_prodid)), results): - for ep_ns in filter_iter( + for ep_ns in filter( lambda e: (e.show_ns.tvid, e.show_ns.prodid, e.season, e.episode) not in seen_eps, item.segment_ns): ep_obj = getattr(ep_ns, 'ep_obj', None) if not ep_obj: @@ -3403,7 +3399,7 @@ class Home(MainHandler): ep_data_list.append(ep_data) seen_eps.add(uniq_sxe) - for snatched in filter_iter(lambda s: ((s.tvid, s.prodid, s.season, s.episode) not in seen_eps), + for snatched in filter(lambda s: ((s.tvid, s.prodid, s.season, s.episode) not in seen_eps), item.snatched_eps): ep_obj = getattr(snatched, 'ep_obj', None) if not ep_obj: @@ -3941,12 +3937,12 @@ class AddShows(Home): b_term = decode_str(used_search_term).strip() terms = [] try: - for cur_term in ([], [b_term.encode('utf-8')])[PY2] + [unidecode(b_term), b_term]: + for cur_term in [unidecode(b_term), b_term]: if cur_term not in terms: terms += [cur_term] except (BaseException, Exception): text = used_search_term.strip() - terms = [text if not PY2 else text.encode('utf-8')] + terms = text return set(s for s in set([used_search_term] + terms) if s) @@ -4082,7 +4078,7 @@ class AddShows(Home): for tvid, name in iteritems(sickgear.TVInfoAPI().all_sources)} if TVINFO_TRAKT in results and TVINFO_TVDB in results: - tvdb_ids = list_keys(results[TVINFO_TVDB]) + tvdb_ids = list(results[TVINFO_TVDB]) results[TVINFO_TRAKT] = {k: v for k, v in iteritems(results[TVINFO_TRAKT]) if v['ids'].tvdb not in tvdb_ids} def in_db(tvid, prod_id): @@ -4397,9 +4393,9 @@ class AddShows(Home): t.infosrc = sickgear.TVInfoAPI().search_sources search_tvid = None if use_show_name and 1 == show_name.count(':'): # if colon is found once - search_tvid = filter_list(lambda x: bool(x), + search_tvid = list(filter(lambda x: bool(x), [('%s:' % sickgear.TVInfoAPI(_tvid).config['slug']) in show_name and _tvid - for _tvid, _ in iteritems(t.infosrc)]) + for _tvid, _ in iteritems(t.infosrc)])) search_tvid = 1 == len(search_tvid) and search_tvid[0] t.provided_tvid = search_tvid or int(tvid or sickgear.TVINFO_DEFAULT) t.infosrc_icons = [sickgear.TVInfoAPI(cur_tvid).config.get('icon') for cur_tvid in t.infosrc] @@ -4530,7 +4526,7 @@ class AddShows(Home): def info_anidb(self, ids, show_name): - if not filter_list(lambda tvid_prodid: helpers.find_show_by_id(tvid_prodid), ids.split(' ')): + if not list(filter(lambda tvid_prodid: helpers.find_show_by_id(tvid_prodid), ids.split(' '))): return self.new_show('|'.join(['', '', '', ' '.join([ids, show_name])]), use_show_name=True, is_anime=True) @staticmethod @@ -4617,8 +4613,8 @@ class AddShows(Home): oldest, newest, oldest_dt, newest_dt = None, None, 9999999, 0 show_list = (data or {}).get('list', {}).get('items', {}) - idx_ids = dict(map_iter(lambda so: (so.imdbid, (so.tvid, so.prodid)), - filter_iter(lambda _so: getattr(_so, 'imdbid', None), sickgear.showList))) + idx_ids = dict(map(lambda so: (so.imdbid, (so.tvid, so.prodid)), + filter(lambda _so: getattr(_so, 'imdbid', None), sickgear.showList))) # list_id = (data or {}).get('list', {}).get('id', {}) for row in show_list: @@ -4753,7 +4749,7 @@ class AddShows(Home): show_obj = helpers.find_show_by_id({TVINFO_IMDB: int(ids['imdb'].replace('tt', ''))}, no_mapped_ids=False) - for tvid in filter_iter(lambda _tvid: _tvid == show_obj.tvid, sickgear.TVInfoAPI().search_sources): + for tvid in filter(lambda _tvid: _tvid == show_obj.tvid, sickgear.TVInfoAPI().search_sources): infosrc_slug, infosrc_url = (sickgear.TVInfoAPI(tvid).config[x] for x in ('slug', 'show_url')) filtered[-1]['ids'][infosrc_slug] = show_obj.prodid @@ -5114,7 +5110,7 @@ class AddShows(Home): def info_trakt(self, ids, show_name): - if not filter_list(lambda tvid_prodid: helpers.find_show_by_id(tvid_prodid), ids.split(' ')): + if not list(filter(lambda tvid_prodid: helpers.find_show_by_id(tvid_prodid), ids.split(' '))): return self.new_show('|'.join(['', '', '', ' '.join([ids, show_name])]), use_show_name=True) def ne_default(self): @@ -5428,7 +5424,7 @@ class AddShows(Home): # noinspection PyUnusedLocal def info_tvmaze(self, ids, show_name): - if not filter_list(lambda tvid_prodid: helpers.find_show_by_id(tvid_prodid), ids.split(' ')): + if not list(filter(lambda tvid_prodid: helpers.find_show_by_id(tvid_prodid), ids.split(' '))): return self.new_show('|'.join(['', '', '', ' '.join([ids, show_name])]), use_show_name=True) def tvc_default(self): @@ -5727,7 +5723,7 @@ class AddShows(Home): dt_ordinal = 0 dt_string = '' - date_tags = filter_list(lambda t: t.find('span'), row.find_all('div', class_='clamp-details')) + date_tags = list(filter(lambda t: t.find('span'), row.find_all('div', class_='clamp-details'))) if date_tags: dt = dateutil.parser.parse(date_tags[0].get_text().strip()) dt_ordinal = dt.toordinal() @@ -5840,11 +5836,11 @@ class AddShows(Home): tvid_prodid_list = [] # first, process known ids - for tvid, infosrc_slug in filter_iter( + for tvid, infosrc_slug in filter( lambda tvid_slug: item['ids'].get(tvid_slug[1]) and not sickgear.TVInfoAPI(tvid_slug[0]).config.get('defunct'), - map_iter(lambda _tvid: (_tvid, sickgear.TVInfoAPI(_tvid).config['slug']), - iterkeys(sickgear.TVInfoAPI().all_sources))): + map(lambda _tvid: (_tvid, sickgear.TVInfoAPI(_tvid).config['slug']), + iterkeys(sickgear.TVInfoAPI().all_sources))): try: src_id = item['ids'][infosrc_slug] tvid_prodid_list += ['%s:%s' % (infosrc_slug, src_id)] @@ -5899,7 +5895,7 @@ class AddShows(Home): known.append(item['show_id']) t.all_shows.append(item) - if any(filter_iter(lambda tp: tp in sickgear.BROWSELIST_HIDDEN, tvid_prodid_list)): + if any(filter(lambda tp: tp in sickgear.BROWSELIST_HIDDEN, tvid_prodid_list)): item['hide'] = True t.num_hidden += 1 @@ -6035,7 +6031,7 @@ class AddShows(Home): any_qualities = [any_qualities] if type(best_qualities) != list: best_qualities = [best_qualities] - new_quality = Quality.combineQualities(map_list(int, any_qualities), map_list(int, best_qualities)) + new_quality = Quality.combineQualities(list(map(int, any_qualities)), list(map(int, best_qualities))) upgrade_once = config.checkbox_to_value(upgrade_once) wanted_begin = config.minimax(wanted_begin, 0, -1, 10) @@ -6403,8 +6399,7 @@ class Manage(MainHandler): ' AND season != 0' ' AND indexer = ? AND showid = ?', status_list + tvid_prodid_list) - what = (sql_result and '|'.join(map_iter(lambda r: '%sx%s' % (r['season'], r['episode']), - sql_result)) + what = (sql_result and '|'.join(map(lambda r: '%sx%s' % (r['season'], r['episode']), sql_result)) or None) to = new_status @@ -6562,7 +6557,8 @@ class Manage(MainHandler): ' WHERE indexer = ? AND showid = ?' ' AND season != 0 AND status LIKE \'%4\'', TVidProdid(cur_tvid_prodid).list) - to_download[cur_tvid_prodid] = map_list(lambda x: '%sx%s' % (x['season'], x['episode']), sql_result) + to_download[cur_tvid_prodid] = list(map(lambda x: '%sx%s' % (x['season'], x['episode']), + sql_result)) for epResult in to_download[cur_tvid_prodid]: season, episode = epResult.split('x') @@ -7461,12 +7457,12 @@ class History(MainHandler): elif 'failures' in sickgear.HISTORY_LAYOUT: - t.provider_fail_stats = filter_list(lambda stat: len(stat['fails']), [ + t.provider_fail_stats = list(filter(lambda stat: len(stat['fails']), [ dict(name=p.name, id=p.get_id(), active=p.is_active(), prov_img=p.image_name(), prov_id=p.get_id(), # 2020.03.17 legacy var, remove at future date fails=p.fails.fails_sorted, next_try=p.get_next_try_time, has_limit=getattr(p, 'has_limit', False), tmr_limit_time=p.tmr_limit_time) - for p in sickgear.providerList + sickgear.newznabProviderList]) + for p in sickgear.providerList + sickgear.newznabProviderList])) t.provider_fail_cnt = len([p for p in t.provider_fail_stats if len(p['fails'])]) t.provider_fails = t.provider_fail_cnt # 2020.03.17 legacy var, remove at future date @@ -7500,11 +7496,11 @@ class History(MainHandler): return result with sg_helpers.DOMAIN_FAILURES.lock: - t.domain_fail_stats = filter_list(lambda stat: len(stat['fails']), [ + t.domain_fail_stats = list(filter(lambda stat: len(stat['fails']), [ dict(name=k, id=sickgear.GenericProvider.make_id(k), img=img(k), cls=img(k, True), fails=v.fails_sorted, next_try=v.get_next_try_time, has_limit=getattr(v, 'has_limit', False), tmr_limit_time=v.tmr_limit_time) - for k, v in iteritems(sg_helpers.DOMAIN_FAILURES.domain_list)]) + for k, v in iteritems(sg_helpers.DOMAIN_FAILURES.domain_list)])) t.domain_fail_cnt = len([d for d in t.domain_fail_stats if len(d['fails'])]) @@ -7658,7 +7654,7 @@ class History(MainHandler): ParentId=folder_id, Filters='IsPlayed', format='json'), timeout=10, parse_json=True) or {} - for d in filter_iter(lambda item: 'Episode' == item.get('Type', ''), items.get('Items')): + for d in filter(lambda item: 'Episode' == item.get('Type', ''), items.get('Items')): try: root_dir_found = False path_file = d.get('Path') @@ -7700,11 +7696,11 @@ class History(MainHandler): if states: # Prune user removed items that are no longer being returned by API - media_paths = map_list(lambda arg: os.path.basename(arg[1]['path_file']), iteritems(states)) + media_paths = list(map(lambda arg: os.path.basename(arg[1]['path_file']), iteritems(states))) sql = 'FROM tv_episodes_watched WHERE hide=1 AND label LIKE "%%{Emby}"' my_db = db.DBConnection(row_type='dict') files = my_db.select('SELECT location %s' % sql) - for i in filter_iter(lambda f: os.path.basename(f['location']) not in media_paths, files): + for i in filter(lambda f: os.path.basename(f['location']) not in media_paths, files): loc = i.get('location') if loc: my_db.select('DELETE %s AND location="%s"' % (sql, loc)) @@ -7769,11 +7765,11 @@ class History(MainHandler): if states: # Prune user removed items that are no longer being returned by API - media_paths = map_list(lambda arg: os.path.basename(arg[1]['path_file']), iteritems(states)) + media_paths = list(map(lambda arg: os.path.basename(arg[1]['path_file']), iteritems(states))) sql = 'FROM tv_episodes_watched WHERE hide=1 AND label LIKE "%%{Plex}"' my_db = db.DBConnection(row_type='dict') files = my_db.select('SELECT location %s' % sql) - for i in filter_iter(lambda f: os.path.basename(f['location']) not in media_paths, files): + for i in filter(lambda f: os.path.basename(f['location']) not in media_paths, files): loc = i.get('location') if loc: my_db.select('DELETE %s AND location="%s"' % (sql, loc)) @@ -7844,7 +7840,7 @@ class History(MainHandler): for cur_result in sql_result: show_obj = helpers.find_show_by_id(tvid_prodid_dict) ep_obj = show_obj.get_episode(cur_result['season'], cur_result['episode']) - for n in filter_iter(lambda x: x.name.lower() in ('emby', 'kodi', 'plex'), + for n in filter(lambda x: x.name.lower() in ('emby', 'kodi', 'plex'), notifiers.NotifierFactory().get_enabled()): if 'PLEX' == n.name: if updating: @@ -7966,7 +7962,7 @@ class ConfigGeneral(Config): seasons = [-1] + seasons[0:-1] # bubble -1 # prepare a seasonal ordered dict for output - alts = ordered_dict([(season, {}) for season in seasons]) + alts = dict([(season, {}) for season in seasons]) # add original show name show_obj = sickgear.helpers.find_show_by_id(tvid_prodid, no_mapped_ids=True) @@ -8057,8 +8053,8 @@ class ConfigGeneral(Config): any_qualities = ([], any_qualities.split(','))[any(any_qualities)] best_qualities = ([], best_qualities.split(','))[any(best_qualities)] - sickgear.QUALITY_DEFAULT = int(Quality.combineQualities(map_list(int, any_qualities), - map_list(int, best_qualities))) + sickgear.QUALITY_DEFAULT = int(Quality.combineQualities(list(map(int, any_qualities)), + list(map(int, best_qualities)))) sickgear.WANTED_BEGIN_DEFAULT = config.minimax(default_wanted_begin, 0, -1, 10) sickgear.WANTED_LATEST_DEFAULT = config.minimax(default_wanted_latest, 0, -1, 10) sickgear.SHOW_TAG_DEFAULT = default_tag @@ -8258,7 +8254,7 @@ class ConfigGeneral(Config): sickgear.WEB_IPV64 = config.checkbox_to_value(web_ipv64) sickgear.HANDLE_REVERSE_PROXY = config.checkbox_to_value(handle_reverse_proxy) sickgear.SEND_SECURITY_HEADERS = config.checkbox_to_value(send_security_headers) - hosts = ','.join(filter_iter(lambda name: not helpers.re_valid_hostname(with_allowed=False).match(name), + hosts = ','.join(filter(lambda name: not helpers.re_valid_hostname(with_allowed=False).match(name), config.clean_hosts(allowed_hosts).split(','))) if not hosts or self.request.host_name in hosts: sickgear.ALLOWED_HOSTS = hosts @@ -8811,7 +8807,7 @@ class ConfigProviders(Config): [k for k in nzb_src.may_filter if config.checkbox_to_value(kwargs.get('%s_filter_%s' % (cur_id, k)))]) - for attr in filter_iter(lambda a: hasattr(nzb_src, a), [ + for attr in filter(lambda a: hasattr(nzb_src, a), [ 'search_fallback', 'enable_recentsearch', 'enable_backlog', 'enable_scheduled_backlog', 'scene_only', 'scene_loose', 'scene_loose_active', 'scene_rej_nuked', 'scene_nuked_active' ]): @@ -8925,12 +8921,12 @@ class ConfigProviders(Config): elif not starify(key, True): setattr(torrent_src, attr, key) - for attr in filter_iter(lambda a: hasattr(torrent_src, a), [ + for attr in filter(lambda a: hasattr(torrent_src, a), [ 'username', 'uid', '_seed_ratio', 'scene_or_contain' ]): setattr(torrent_src, attr, str(kwargs.get(src_id_prefix + attr.replace('_seed_', ''), '')).strip()) - for attr in filter_iter(lambda a: hasattr(torrent_src, a), [ + for attr in filter(lambda a: hasattr(torrent_src, a), [ 'minseed', 'minleech', 'seed_time' ]): setattr(torrent_src, attr, config.to_int(str(kwargs.get(src_id_prefix + attr, '')).strip())) @@ -8941,7 +8937,7 @@ class ConfigProviders(Config): [k for k in getattr(torrent_src, 'may_filter', 'nop') if config.checkbox_to_value(kwargs.get('%sfilter_%s' % (src_id_prefix, k)))]) - for attr in filter_iter(lambda a: hasattr(torrent_src, a), [ + for attr in filter(lambda a: hasattr(torrent_src, a), [ 'confirmed', 'freeleech', 'reject_m2ts', 'use_after_get_data', 'enable_recentsearch', 'enable_backlog', 'search_fallback', 'enable_scheduled_backlog', 'scene_only', 'scene_loose', 'scene_loose_active', @@ -8949,7 +8945,7 @@ class ConfigProviders(Config): ]): setattr(torrent_src, attr, config.checkbox_to_value(kwargs.get(src_id_prefix + attr))) - for attr, default in filter_iter(lambda arg: hasattr(torrent_src, arg[0]), [ + for attr, default in filter(lambda arg: hasattr(torrent_src, arg[0]), [ ('search_mode', 'eponly'), ]): setattr(torrent_src, attr, str(kwargs.get(src_id_prefix + attr) or default).strip()) @@ -8973,10 +8969,10 @@ class ConfigProviders(Config): setattr(nzb_src, attr, config.checkbox_to_value(kwargs.get(src_id_prefix + attr)) or not getattr(nzb_src, 'supports_backlog', True)) - for attr in filter_iter(lambda a: hasattr(nzb_src, a), - ['search_fallback', 'enable_backlog', 'enable_scheduled_backlog', - 'scene_only', 'scene_loose', 'scene_loose_active', - 'scene_rej_nuked', 'scene_nuked_active']): + for attr in filter(lambda a: hasattr(nzb_src, a), + ['search_fallback', 'enable_backlog', 'enable_scheduled_backlog', + 'scene_only', 'scene_loose', 'scene_loose_active', + 'scene_rej_nuked', 'scene_nuked_active']): setattr(nzb_src, attr, config.checkbox_to_value(kwargs.get(src_id_prefix + attr))) for (attr, default) in [('scene_or_contain', ''), ('search_mode', 'eponly')]: diff --git a/sickgear/webserveInit.py b/sickgear/webserveInit.py index 8fd7b086..8f1e4ad5 100644 --- a/sickgear/webserveInit.py +++ b/sickgear/webserveInit.py @@ -14,7 +14,6 @@ from .helpers import create_https_certificates, re_valid_hostname import sickgear from _23 import PY38 -from six import PY2 # noinspection PyUnreachableCode if False: @@ -255,14 +254,13 @@ class WebServer(threading.Thread): logger.log(u'Starting SickGear on %s://%s:%s/' % (protocol, self.options['host'], self.options['port'])) # python 3 needs to start event loop first - if not PY2: - import asyncio - if 'win32' == platform and PY38: - # noinspection PyUnresolvedReferences - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) - asyncio.set_event_loop(asyncio.new_event_loop()) - from tornado.platform.asyncio import AnyThreadEventLoopPolicy - asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) + import asyncio + if 'win32' == platform and PY38: + # noinspection PyUnresolvedReferences + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + asyncio.set_event_loop(asyncio.new_event_loop()) + from tornado.platform.asyncio import AnyThreadEventLoopPolicy + asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) try: self.server = self.app.listen(self.options['port'], self.options['host'], ssl_options=ssl_options, From 32987134ba5dda4c778d14cd5a3eb1d40b8167bc Mon Sep 17 00:00:00 2001 From: JackDandy Date: Mon, 13 Feb 2023 21:00:11 +0000 Subject: [PATCH 05/21] Change codebase cleanups. Cleanup most init warnings. Cleanup some vars, pythonic instead of js. Some typos and python var/func names for Scheduler. Remove legacy handlers deprecated in 2020. Remove some legacy tagged stuff. Cleanup ConfigParser and 23.py Change cleanup vendored scandir. Remove redundant pkg_resources.py in favour of the vendor folder. Remove backports. Remove trakt checker. Change remove redundant WindowsSelectorEventLoopPolicy from webserveInit. Cleanup varnames and providers Various minor tidy ups to remove ide warnings. --- CHANGES.md | 3 +- _cleaner.py | 3 + gui/slick/interfaces/default/cache.tmpl | 2 +- .../interfaces/default/config_providers.tmpl | 18 +- gui/slick/interfaces/default/displayShow.tmpl | 2 +- gui/slick/interfaces/default/editShow.tmpl | 2 +- gui/slick/interfaces/default/history.tmpl | 16 +- .../default/inc_addShowOptions.tmpl | 2 +- .../interfaces/default/inc_displayShow.tmpl | 6 +- .../default/inc_qualityChooser.tmpl | 2 +- gui/slick/interfaces/default/inc_top.tmpl | 2 +- .../default/manage_failedDownloads.tmpl | 2 +- .../interfaces/default/manage_massEdit.tmpl | 2 +- lib/_23.py | 48 +- lib/adba/__init__.py | 2 +- lib/backports/configparser/__init__.py | 1473 --------- lib/backports/configparser/helpers.py | 274 -- lib/backports/functools_lru_cache/__init__.py | 196 -- lib/backports/ssl_match_hostname/__init__.py | 204 -- lib/backports_abc.py | 216 -- lib/browser_ua.py | 13 +- lib/bs4_parser.py | 3 +- lib/exceptions_helper.py | 3 +- lib/pkg_resources.py | 2625 ----------------- lib/sg_helpers.py | 17 +- lib/subliminal/services/itasa.py | 48 +- sickgear.py | 11 +- sickgear/__init__.py | 104 +- sickgear/_legacy.py | 828 ------ sickgear/browser.py | 2 +- sickgear/classes.py | 6 +- sickgear/clients/download_station.py | 22 +- sickgear/clients/generic.py | 12 +- sickgear/clients/qbittorrent.py | 22 +- sickgear/clients/rtorrent.py | 2 +- sickgear/clients/transmission.py | 2 +- sickgear/clients/utorrent.py | 8 +- sickgear/common.py | 70 +- sickgear/config.py | 41 +- sickgear/databases/mainDB.py | 16 +- sickgear/event_queue.py | 2 +- sickgear/failedProcessor.py | 10 +- sickgear/failed_history.py | 6 +- sickgear/generic_queue.py | 2 +- sickgear/gh_api.py | 36 +- sickgear/helpers.py | 12 +- sickgear/history.py | 12 +- sickgear/image_cache.py | 4 +- sickgear/indexermapper.py | 4 +- sickgear/logger.py | 4 +- sickgear/metadata/__init__.py | 10 +- sickgear/metadata/generic.py | 16 +- sickgear/metadata/helpers.py | 2 +- sickgear/metadata/kodi.py | 10 +- sickgear/name_cache.py | 12 +- sickgear/name_parser/parser.py | 24 +- sickgear/name_parser/regexes.py | 2 +- sickgear/naming.py | 10 +- sickgear/network_timezones.py | 14 +- sickgear/nzbSplitter.py | 46 +- sickgear/people_queue.py | 4 +- sickgear/postProcessor.py | 18 +- sickgear/properFinder.py | 16 +- sickgear/providers/__init__.py | 107 +- sickgear/providers/btn.py | 2 +- sickgear/providers/generic.py | 36 +- sickgear/providers/newznab.py | 30 +- sickgear/providers/snowfl.py | 2 +- sickgear/sab.py | 2 +- sickgear/scene_exceptions.py | 4 +- sickgear/scene_numbering.py | 8 +- sickgear/scheduler.py | 32 +- sickgear/search.py | 52 +- sickgear/search_backlog.py | 30 +- sickgear/search_queue.py | 8 +- sickgear/sgdatetime.py | 5 +- sickgear/show_name_helpers.py | 8 +- sickgear/show_queue.py | 6 +- sickgear/show_updater.py | 2 +- sickgear/traktChecker.py | 222 -- sickgear/tv.py | 92 +- sickgear/tv_base.py | 4 +- sickgear/tvcache.py | 112 +- sickgear/ui.py | 26 +- sickgear/watchedstate.py | 1 + sickgear/watchedstate_queue.py | 2 +- sickgear/webapi.py | 40 +- sickgear/webserve.py | 288 +- sickgear/webserveInit.py | 25 +- tests/common_tests.py | 12 +- tests/helpers_tests.py | 2 +- tests/name_parser_tests.py | 12 +- tests/pp_tests.py | 4 +- tests/scene_helpers_tests.py | 8 +- tests/show_tests.py | 28 +- tests/snatch_tests.py | 2 +- tests/test_lib.py | 4 +- tests/webapi_tests.py | 26 +- 98 files changed, 895 insertions(+), 6955 deletions(-) delete mode 100644 lib/backports/configparser/__init__.py delete mode 100644 lib/backports/configparser/helpers.py delete mode 100644 lib/backports/functools_lru_cache/__init__.py delete mode 100644 lib/backports/ssl_match_hostname/__init__.py delete mode 100644 lib/backports_abc.py delete mode 100644 lib/pkg_resources.py delete mode 100644 sickgear/_legacy.py delete mode 100644 sickgear/traktChecker.py diff --git a/CHANGES.md b/CHANGES.md index 3a1e2240..ffb6f19b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,6 +3,7 @@ * Update package resource API 63.2.0 (3ae44cd) to 67.3.2 (b9bf2ec) * Change remove calls to legacy py2 fix encoding function * Change requirements for pure py3 +* Change codebase cleanups ### 3.27.8 (2023-02-20 23:30:00 UTC) @@ -1080,7 +1081,7 @@ * Add API response field `global exclude require` to sg.listrequirewords endpoint * Change improve Popen resource usage under py2 * Add overall failure monitoring to History/Connect fails (renamed from "Provider fails") -* Change log exception during updateCache in newznab +* Change log exception during update_cache in newznab * Change make Py3.9 preparations * Change anime "Available groups" to display "No groups listed..." when API is fine with no results instead of blank * Change improve clarity of anime group lists by using terms Allow list and Block list diff --git a/_cleaner.py b/_cleaner.py index bfc31345..8c6eac00 100644 --- a/_cleaner.py +++ b/_cleaner.py @@ -37,6 +37,9 @@ if old_magic != magic_number: # skip cleaned005 as used during dev by testers cleanups = [ + ['.cleaned009.tmp', r'lib\scandir', [ + r'lib\scandir\__pycache__', r'lib\scandir', + ]], ['.cleaned008.tmp', r'lib\tornado_py3', [ r'lib\bs4_py2\builder\__pycache__', r'lib\bs4_py2\builder', r'lib\bs4_py2', r'lib\bs4_py3\builder\__pycache__', r'lib\bs4_py3\builder', r'lib\bs4_py3', diff --git a/gui/slick/interfaces/default/cache.tmpl b/gui/slick/interfaces/default/cache.tmpl index 7b115f67..be3ba876 100644 --- a/gui/slick/interfaces/default/cache.tmpl +++ b/gui/slick/interfaces/default/cache.tmpl @@ -65,7 +65,7 @@ #for $hItem in $cacheResults: - #set $provider = $providers.getProviderClass($hItem['provider']) + #set $provider = $providers.get_by_id($hItem['provider']) #set $tip = '%s @ %s' % ($hItem['provider'], $SGDatetime.sbfdatetime($SGDatetime.fromtimestamp($hItem['time']))) #set $ver = $hItem['version'] #set $ver = ($ver, '')[-1 == $ver] diff --git a/gui/slick/interfaces/default/config_providers.tmpl b/gui/slick/interfaces/default/config_providers.tmpl index 3c8342dc..ae0c1363 100644 --- a/gui/slick/interfaces/default/config_providers.tmpl +++ b/gui/slick/interfaces/default/config_providers.tmpl @@ -36,12 +36,12 @@ >>>> NOTE: Removed self.finish <<<<<----- - - If the ``status`` argument is specified, that value is used as the - HTTP status code; otherwise either 301 (permanent) or 302 - (temporary) is chosen based on the ``permanent`` argument. - The default is 302 (temporary). - """ - if not url.startswith(sickgear.WEB_ROOT): - url = sickgear.WEB_ROOT + url - - # noinspection PyUnresolvedReferences - if self._headers_written: - raise Exception('Cannot redirect after headers have been written') - if status is None: - status = 301 if permanent else 302 - else: - assert isinstance(status, int) - assert 300 <= status <= 399 - self.set_status(status) - self.set_header('Location', urljoin(utf8(self.request.uri), - utf8(url))) - - # todo: move to RouteHandler after removing _legacy module - def write_error(self, status_code, **kwargs): - body = '' - try: - if self.request.body: - body = '\nRequest body: %s' % decode_str(self.request.body) - except (BaseException, Exception): - pass - logger.log('Sent %s error response to a `%s` request for `%s` with headers:\n%s%s' % - (status_code, self.request.method, self.request.path, self.request.headers, body), logger.WARNING) - # suppress traceback by removing 'exc_info' kwarg - if 'exc_info' in kwargs: - logger.log('Gracefully handled exception text:\n%s' % traceback.format_exception(*kwargs["exc_info"]), - logger.DEBUG) - del kwargs['exc_info'] - return super(LegacyBase, self).write_error(status_code, **kwargs) - - def data_received(self, *args): - pass - - -class LegacyBaseHandler(LegacyBase): - - def redirect_args(self, new_url, exclude=(None,), **kwargs): - args = '&'.join(['%s=%s' % (k, v) for (k, v) in - filter(lambda arg: arg[1] not in exclude, iteritems(kwargs))]) - self.redirect('%s%s' % (new_url, ('', '?' + args)[bool(args)]), permanent=True) - - """ deprecated from BaseHandler ------------------------------------------------------------------------------------ - """ - def getImage(self, *args, **kwargs): - return self.get_image(*args, **kwargs) - - def get_image(self, *args, **kwargs): - # abstract method - pass - - def showPoster(self, show=None, **kwargs): - # test: /showPoster/?show=73141&which=poster_thumb - return self.show_poster(TVidProdid(show)(), **kwargs) - - def show_poster(self, *args, **kwargs): - # abstract method - pass - - """ deprecated from MainHandler ------------------------------------------------------------------------------------ - """ - def episodeView(self, **kwargs): - self.redirect_args('/daily-schedule', exclude=(None, False), **kwargs) - - def setHomeLayout(self, *args, **kwargs): - return self.set_layout_view_shows(*args, **kwargs) - - def set_layout_view_shows(self, *args, **kwargs): - # abstract method - pass - - def setPosterSortBy(self, *args): - return self.set_poster_sortby(*args) - - @staticmethod - def set_poster_sortby(*args): - # abstract method - pass - - def setPosterSortDir(self, *args): - return self.set_poster_sortdir(*args) - - @staticmethod - def set_poster_sortdir(*args): - # abstract method - pass - - def setEpisodeViewLayout(self, *args): - return self.set_layout_daily_schedule(*args) - - def set_layout_daily_schedule(self, *args): - # abstract method - pass - - def toggleEpisodeViewDisplayPaused(self): - return self.toggle_display_paused_daily_schedule() - - # completely deprecated for the three way state set_ function - # def toggle_display_paused_daily_schedule(self): - # # abstract method - # pass - - def toggle_display_paused_daily_schedule(self): - - return self.set_display_paused_daily_schedule(not sickgear.EPISODE_VIEW_DISPLAY_PAUSED) - - def set_display_paused_daily_schedule(self, *args, **kwargs): - # abstract method - pass - - def setEpisodeViewCards(self, *args, **kwargs): - return self.set_cards_daily_schedule(*args, **kwargs) - - def set_cards_daily_schedule(self, *args, **kwargs): - # abstract method - pass - - def setEpisodeViewSort(self, *args, **kwargs): - return self.set_sort_daily_schedule(*args, **kwargs) - - def set_sort_daily_schedule(self, *args, **kwargs): - # abstract method - pass - - def getFooterTime(self, *args, **kwargs): - return self.get_footer_time(*args, **kwargs) - - @staticmethod - def get_footer_time(*args, **kwargs): - # abstract method - pass - - def toggleDisplayShowSpecials(self, **kwargs): - return self.toggle_specials_view_show(TVidProdid(kwargs.get('show'))()) - - def toggle_specials_view_show(self, *args): - # abstract method - pass - - def setHistoryLayout(self, *args): - return self.set_layout_history(*args) - - def set_layout_history(self, *args): - # abstract method - pass - - """ deprecated from Home ------------------------------------------------------------------------------------------- - """ - def showlistView(self): - self.redirect('/view-shows', permanent=True) - - def viewchanges(self): - self.redirect('/home/view-changes', permanent=True) - - def displayShow(self, **kwargs): - self.migrate_redir('view-show', **kwargs) - - def editShow(self, **kwargs): - kwargs['any_qualities'] = kwargs.pop('anyQualities', None) - kwargs['best_qualities'] = kwargs.pop('bestQualities', None) - kwargs['exceptions_list'] = kwargs.pop('exceptions_list', None) - kwargs['direct_call'] = kwargs.pop('directCall', False) - kwargs['tvinfo_lang'] = kwargs.pop('indexerLang', None) - kwargs['subs'] = kwargs.pop('subtitles', None) - self.migrate_redir('edit-show', **kwargs) - - def testRename(self, **kwargs): - self.migrate_redir('rename-media', **kwargs) - - def migrate_redir(self, new_url, **kwargs): - kwargs['tvid_prodid'] = TVidProdid(kwargs.pop('show', ''))() - self.redirect_args('/home/%s' % new_url, exclude=(None, False), **kwargs) - - def setStatus(self, **kwargs): - kwargs['tvid_prodid'] = TVidProdid(kwargs.pop('show', ''))() - return self.set_show_status(**kwargs) - - def set_show_status(self, **kwargs): - # abstract method - pass - - def branchCheckout(self, *args): - return self.branch_checkout(*args) - - def branch_checkout(self, *args): - # abstract method - pass - - def pullRequestCheckout(self, *args): - return self.pull_request_checkout(*args) - - def pull_request_checkout(self, *args): - # abstract method - pass - - def display_season(self, **kwargs): - kwargs['tvid_prodid'] = TVidProdid(kwargs.pop('show', ''))() - return self.season_render(**kwargs) - - def season_render(self, **kwargs): - # abstract method - pass - - def plotDetails(self, show, *args): - return self.plot_details(TVidProdid(show)(), *args) - - @staticmethod - def plot_details(*args): - # abstract method - pass - - def sceneExceptions(self, show): - return self.scene_exceptions(TVidProdid(show)()) - - @staticmethod - def scene_exceptions(*args): - # abstract method - pass - - def saveMapping(self, show, **kwargs): - kwargs['m_tvid'] = kwargs.pop('mindexer', 0) - kwargs['m_prodid'] = kwargs.pop('mindexerid', 0) - return self.save_mapping(TVidProdid(show)(), **kwargs) - - def save_mapping(self, *args, **kwargs): - # abstract method - pass - - def forceMapping(self, show, **kwargs): - return self.force_mapping(TVidProdid(show)(), **kwargs) - - @staticmethod - def force_mapping(*args, **kwargs): - # abstract method - pass - - def deleteShow(self, **kwargs): - kwargs['tvid_prodid'] = TVidProdid(kwargs.pop('show', ''))() - return self.delete_show(**kwargs) - - def delete_show(self, *args, **kwargs): - # abstract method - pass - - def refreshShow(self, **kwargs): - kwargs['tvid_prodid'] = TVidProdid(kwargs.pop('show', ''))() - return self.refresh_show(**kwargs) - - def refresh_show(self, *args, **kwargs): - # abstract method - pass - - def updateShow(self, **kwargs): - kwargs['tvid_prodid'] = TVidProdid(kwargs.pop('show', ''))() - return self.update_show(**kwargs) - - def update_show(self, *args, **kwargs): - # abstract method - pass - - def subtitleShow(self, **kwargs): - kwargs['tvid_prodid'] = TVidProdid(kwargs.pop('show', ''))() - return self.subtitle_show(**kwargs) - - def subtitle_show(self, *args, **kwargs): - # abstract method - pass - - def doRename(self, **kwargs): - kwargs['tvid_prodid'] = TVidProdid(kwargs.pop('show', ''))() - return self.do_rename(**kwargs) - - def do_rename(self, *args, **kwargs): - # abstract method - pass - - def episode_search(self, **kwargs): - kwargs['tvid_prodid'] = TVidProdid(kwargs.pop('show', ''))() - return self.search_episode(**kwargs) - - def search_episode(self, *args, **kwargs): - # abstract method - pass - - def searchEpisodeSubtitles(self, **kwargs): - kwargs['tvid_prodid'] = TVidProdid(kwargs.pop('show', ''))() - return self.search_episode_subtitles(**kwargs) - - def search_episode_subtitles(self, *args, **kwargs): - # abstract method - pass - - def setSceneNumbering(self, **kwargs): - return self.set_scene_numbering( - tvid_prodid={kwargs.pop('indexer', ''): kwargs.pop('show', '')}, - for_season=kwargs.get('forSeason'), for_episode=kwargs.get('forEpisode'), - scene_season=kwargs.get('sceneSeason'), scene_episode=kwargs.get('sceneEpisode'), - scene_absolute=kwargs.get('sceneAbsolute')) - - @staticmethod - def set_scene_numbering(*args, **kwargs): - # abstract method - pass - - def update_emby(self, **kwargs): - kwargs['tvid_prodid'] = TVidProdid(kwargs.pop('show', ''))() - return self.update_mb(**kwargs) - - def update_mb(self, *args, **kwargs): - # abstract method - pass - - def search_q_progress(self, **kwargs): - kwargs['tvid_prodid'] = TVidProdid(kwargs.pop('show', ''))() - return self.search_q_status(**kwargs) - - def search_q_status(self, *args, **kwargs): - # abstract method - pass - - """ deprecated from NewHomeAddShows i.e. HomeAddShows -------------------------------------------------------------- - """ - def addExistingShows(self, **kwargs): - kwargs['prompt_for_settings'] = kwargs.pop('promptForSettings', None) - self.redirect_args('/add-shows/add-existing-shows', **kwargs) - - def addAniDBShow(self, **kwargs): - self.migrate_redir_add_shows('info-anidb', TVINFO_TVDB, **kwargs) - - def addIMDbShow(self, **kwargs): - self.migrate_redir_add_shows('info-imdb', TVINFO_IMDB, **kwargs) - - def addTraktShow(self, **kwargs): - self.migrate_redir_add_shows('info-trakt', TVINFO_TVDB, **kwargs) - - def migrate_redir_add_shows(self, new_url, tvinfo, **kwargs): - prodid = kwargs.pop('indexer_id', None) - if prodid: - kwargs['ids'] = prodid - if TVINFO_TVDB == tvinfo and prodid: - kwargs['ids'] = TVidProdid({tvinfo: prodid})() - kwargs['show_name'] = kwargs.pop('showName', None) - self.redirect_args('/add-shows/%s' % new_url, **kwargs) - - def getIndexerLanguages(self): - return self.get_infosrc_languages() - - @staticmethod - def get_infosrc_languages(): - # abstract method - pass - - def searchIndexersForShowName(self, *args, **kwargs): - return self.search_tvinfo_for_showname(*args, **kwargs) - - def search_tvinfo_for_showname(self, *args, **kwargs): - # abstract method - pass - - def massAddTable(self, **kwargs): - return self.mass_add_table( - root_dir=kwargs.pop('rootDir', None), **kwargs) - - def mass_add_table(self, *args, **kwargs): - # abstract method - pass - - def addNewShow(self, **kwargs): - return self.add_new_show( - provided_tvid=kwargs.pop('providedIndexer', None), - which_series=kwargs.pop('whichSeries', None), - tvinfo_lang=kwargs.pop('indexerLang', 'en'), - root_dir=kwargs.pop('rootDir', None), - default_status=kwargs.pop('defaultStatus', None), - any_qualities=kwargs.pop('anyQualities', None), - best_qualities=kwargs.pop('bestQualities', None), - subs=kwargs.pop('subtitles', None), - full_show_path=kwargs.pop('fullShowPath', None), - skip_show=kwargs.pop('skipShow', None), - **kwargs) - - def add_new_show(self, *args, **kwargs): - # abstract method - pass - - """ deprecated from ConfigGeneral ---------------------------------------------------------------------------------- - """ - def generateKey(self): - return self.generate_key() - - @staticmethod - def generate_key(): - # abstract method - pass - - def saveRootDirs(self, **kwargs): - return self.save_root_dirs(root_dir_string=kwargs.get('rootDirString')) - - @staticmethod - def save_root_dirs(**kwargs): - # abstract method - pass - - def saveResultPrefs(self, **kwargs): - return self.save_result_prefs(**kwargs) - - @staticmethod - def save_result_prefs(**kwargs): - # abstract method - pass - - def saveAddShowDefaults(self, *args, **kwargs): - return self.save_add_show_defaults(*args, **kwargs) - - @staticmethod - def save_add_show_defaults(*args, **kwargs): - # abstract method - pass - - def saveGeneral(self, **kwargs): - return self.save_general(**kwargs) - - def save_general(self, **kwargs): - # abstract method - pass - - """ deprecated from ConfigSearch ----------------------------------------------------------------------------------- - """ - def saveSearch(self, **kwargs): - return self.save_search(**kwargs) - - def save_search(self, **kwargs): - # abstract method - pass - - """ deprecated from ConfigProviders -------------------------------------------------------------------------------- - """ - def canAddNewznabProvider(self, *args): - return self.can_add_newznab_provider(*args) - - @staticmethod - def can_add_newznab_provider(*args): - # abstract method - pass - - def getNewznabCategories(self, *args): - return self.get_newznab_categories(*args) - - @staticmethod - def get_newznab_categories(*args): - # abstract method - pass - - def canAddTorrentRssProvider(self, *args): - return self.can_add_torrent_rss_provider(*args) - - @staticmethod - def can_add_torrent_rss_provider(*args): - # abstract method - pass - - def checkProvidersPing(self): - return self.check_providers_ping() - - @staticmethod - def check_providers_ping(): - # abstract method - pass - - def saveProviders(self, *args, **kwargs): - return self.save_providers(*args, **kwargs) - - def save_providers(self, *args, **kwargs): - # abstract method - pass - - """ deprecated from ConfigPostProcessing --------------------------------------------------------------------------- - """ - def savePostProcessing(self, **kwargs): - return self.save_post_processing(**kwargs) - - def save_post_processing(self, **kwargs): - # abstract method - pass - - def testNaming(self, *args, **kwargs): - return self.test_naming(*args, **kwargs) - - @staticmethod - def test_naming(*args, **kwargs): - # abstract method - pass - - def isNamingValid(self, *args, **kwargs): - return self.is_naming_valid(*args, **kwargs) - - @staticmethod - def is_naming_valid(*args, **kwargs): - # abstract method - pass - - def isRarSupported(self): - return self.is_rar_supported() - - @staticmethod - def is_rar_supported(): - # abstract method - pass - - """ deprecated from ConfigSubtitles -------------------------------------------------------------------------------- - """ - def saveSubtitles(self, **kwargs): - return self.save_subtitles(**kwargs) - - def save_subtitles(self, **kwargs): - # abstract method - pass - - """ deprecated from ConfigAnime ------------------------------------------------------------------------------------ - """ - def saveAnime(self, **kwargs): - return self.save_anime(**kwargs) - - def save_anime(self, **kwargs): - # abstract method - pass - - """ deprecated from Manage ----------------------------------------------------------------------------------------- - """ - def episode_statuses(self, **kwargs): - self.redirect_args('/manage/episode-overview', **kwargs) - - def subtitleMissed(self, **kwargs): - kwargs['which_subs'] = kwargs.pop('whichSubs', None) - self.redirect_args('/manage/subtitle_missed', **kwargs) - - def show_episode_statuses(self, **kwargs): - return self.get_status_episodes(TVidProdid(kwargs.get('indexer_id'))(), kwargs.get('which_status')) - - @staticmethod - def get_status_episodes(*args): - # abstract method - pass - - def showSubtitleMissed(self, **kwargs): - return self.show_subtitle_missed(TVidProdid(kwargs.get('indexer_id'))(), kwargs.get('whichSubs')) - - @staticmethod - def show_subtitle_missed(*args): - # abstract method - pass - - def downloadSubtitleMissed(self, **kwargs): - return self.download_subtitle_missed(**kwargs) - - def download_subtitle_missed(self, **kwargs): - # abstract method - pass - - def backlogShow(self, **kwargs): - return self.backlog_show(TVidProdid(kwargs.get('indexer_id'))()) - - def backlog_show(self, *args): - # abstract method - pass - - def backlogOverview(self): - self.redirect('/manage/backlog_overview', permanent=True) - - def massEdit(self, **kwargs): - return self.mass_edit(to_edit=kwargs.get('toEdit')) - - def mass_edit(self, **kwargs): - # abstract method - pass - - def massEditSubmit(self, **kwargs): - kwargs['to_edit'] = kwargs.pop('toEdit', None) - kwargs['subs'] = kwargs.pop('subtitles', None) - kwargs['any_qualities'] = kwargs.pop('anyQualities', None) - kwargs['best_qualities'] = kwargs.pop('bestQualities', None) - return self.mass_edit_submit(**kwargs) - - def mass_edit_submit(self, **kwargs): - # abstract method - pass - - def bulkChange(self, **kwargs): - return self.bulk_change( - to_update=kwargs.get('toUpdate'), to_refresh=kwargs.get('toRefresh'), - to_rename=kwargs.get('toRename'), to_delete=kwargs.get('toDelete'), to_remove=kwargs.get('toRemove'), - to_metadata=kwargs.get('toMetadata'), to_subtitle=kwargs.get('toSubtitle')) - - def bulk_change(self, **kwargs): - # abstract method - pass - - def failedDownloads(self, **kwargs): - kwargs['to_remove'] = kwargs.pop('toRemove', None) - return self.failed_downloads(**kwargs) - - def failed_downloads(self, **kwargs): - # abstract method - pass - - """ deprecated from ManageSearches --------------------------------------------------------------------------------- - """ - def retryProvider(self, **kwargs): - return self.retry_provider(**kwargs) - - @staticmethod - def retry_provider(**kwargs): - # abstract method - pass - - def forceVersionCheck(self): - return self.check_update() - - def check_update(self): - # abstract method - pass - - def forceBacklog(self): - return self.force_backlog() - - def force_backlog(self): - # abstract method - pass - - def forceSearch(self): - return self.force_search() - - def force_search(self): - # abstract method - pass - - def forceFindPropers(self): - return self.force_find_propers() - - def force_find_propers(self): - # abstract method - pass - - def pauseBacklog(self, **kwargs): - return self.pause_backlog(**kwargs) - - def pause_backlog(self, **kwargs): - # abstract method - pass - - """ deprecated from ShowProcesses ---------------------------------------------------------------------------------- - """ - def forceShowUpdate(self): - return self.force_show_update() - - def force_show_update(self): - # abstract method - pass - - """ deprecated from History ---------------------------------------------------------------------------------------- - """ - def clearHistory(self): - return self.clear_history() - - def clear_history(self): - # abstract method - pass - - def trimHistory(self): - return self.trim_history() - - def trim_history(self): - # abstract method - pass - - """ deprecated from ErrorLogs -------------------------------------------------------------------------------------- - """ - def clearerrors(self): - self.redirect('/errors/clear-log') - - def viewlog(self, **kwargs): - self.redirect_args('/events/view-log/', **kwargs) - - def downloadlog(self): - return self.download_log() - - def download_log(self): - # abstract method - pass - - """ ------------------------------------------------------------------------------------------------------------ """ - """ ------------------------------------------------------------------------------------------------------------ """ - """ end of base deprecated function stubs """ - """ ------------------------------------------------------------------------------------------------------------ """ - """ ------------------------------------------------------------------------------------------------------------ """ - - -class LegacyRouteHandler(RequestHandler): - - def data_received(self, *args): - pass - - def __init__(self, *arg, **kwargs): - super(LegacyRouteHandler, self).__init__(*arg, **kwargs) - self.lock = threading.Lock() - - def set_default_headers(self): - self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0') - self.set_header('X-Robots-Tag', 'noindex, nofollow, noarchive, nocache, noodp, noydir, noimageindex, nosnippet') - if sickgear.SEND_SECURITY_HEADERS: - self.set_header('X-Frame-Options', 'SAMEORIGIN') - - # noinspection PyUnusedLocal - @gen.coroutine - def get(self, *args, **kwargs): - getattr(self, 'index')() - - def redirect(self, url, permanent=False, status=None): - if not url.startswith(sickgear.WEB_ROOT): - url = sickgear.WEB_ROOT + url - - super(LegacyRouteHandler, self).redirect(url, permanent, status) - - -class LegacyManageManageSearches(LegacyRouteHandler): - - """ deprecated from ManageSearches --------------------------------------------------------------------------------- - """ - def index(self): - self.redirect('/manage/search-tasks/', permanent=True) - - -class LegacyManageShowProcesses(LegacyRouteHandler): - - """ deprecated from ManageShowProcesses ---------------------------------------------------------------------------- - """ - def index(self): - self.redirect('/manage/show-tasks/', permanent=True) - - -class LegacyConfigPostProcessing(LegacyRouteHandler): - - """ deprecated from ConfigPostProcessing --------------------------------------------------------------------------- - """ - def index(self): - self.redirect('/config/media-process/', permanent=True) - - -class LegacyHomeAddShows(LegacyRouteHandler): - - """ deprecated from NewHomeAddShows i.e. HomeAddShows -------------------------------------------------------------- - """ - def index(self): - self.redirect('/add-shows/', permanent=True) - - -class LegacyErrorLogs(LegacyRouteHandler): - - """ deprecated from ErrorLogs -------------------------------------------------------------------------------------- - """ - def index(self): - self.redirect('/events/', permanent=True) diff --git a/sickgear/browser.py b/sickgear/browser.py index 1c62b9e0..aa0e8589 100644 --- a/sickgear/browser.py +++ b/sickgear/browser.py @@ -46,7 +46,7 @@ def get_win_drives(): def folders_at_path(path, include_parent=False, include_files=False): """ Returns a list of dictionaries with the folders contained at the given path Give the empty string as the path to list the contents of the root path - under Unix this means "/", on Windows this will be a list of drive letters) + under Unix this means "/", (on Windows this will be a list of drive letters) """ # walk up the tree until we find a valid path diff --git a/sickgear/classes.py b/sickgear/classes.py index 5065e05a..a10360d0 100644 --- a/sickgear/classes.py +++ b/sickgear/classes.py @@ -155,7 +155,7 @@ class SearchResult(LegacySearchResult): class NZBSearchResult(SearchResult): """ - Regular NZB result with an URL to the NZB + Regular NZB result with a URL to the NZB """ resultType = 'nzb' @@ -169,7 +169,7 @@ class NZBDataSearchResult(SearchResult): class TorrentSearchResult(SearchResult): """ - Torrent result with an URL to the torrent + Torrent result with a URL to the torrent """ resultType = 'torrent' @@ -456,7 +456,7 @@ class SimpleNamespace(object): # list that supports weak reference -class weakList(list): +class WeakList(list): __slots__ = ('__weakref__',) diff --git a/sickgear/clients/download_station.py b/sickgear/clients/download_station.py index 62fc27ff..03f95f15 100644 --- a/sickgear/clients/download_station.py +++ b/sickgear/clients/download_station.py @@ -164,8 +164,8 @@ class DownloadStationAPI(GenericClient): # type: (Union[AnyStr, list]) -> Union[bool, list] """ Pause item(s) - :param ids: Id(s) to pause - :return: True/Falsy if success/failure else Id(s) that failed to be paused + :param ids: ID(s) to pause + :return: True/Falsy if success/failure else ID(s) that failed to be paused """ return self._action( 'pause', ids, @@ -177,8 +177,8 @@ class DownloadStationAPI(GenericClient): # type: (Union[AnyStr, list]) -> Union[bool, list] """ Resume task(s) in client - :param ids: Id(s) to act on - :return: True if success, Id(s) that could not be resumed, else Falsy if failure + :param ids: ID(s) to act on + :return: True if success, ID(s) that could not be resumed, else Falsy if failure """ return self._perform_task( 'resume', ids, @@ -190,8 +190,8 @@ class DownloadStationAPI(GenericClient): # type: (Union[AnyStr, list]) -> Union[bool, list] """ Delete task(s) from client - :param ids: Id(s) to act on - :return: True if success, Id(s) that could not be deleted, else Falsy if failure + :param ids: ID(s) to act on + :return: True if success, ID(s) that could not be deleted, else Falsy if failure """ return self._perform_task( 'delete', ids, @@ -205,10 +205,10 @@ class DownloadStationAPI(GenericClient): """ Set up and send a method to client :param method: Either `resume` or `delete` - :param ids: Id(s) to perform method on + :param ids: ID(s) to perform method on :param filter_func: Call back function to filter tasks as failed or erroneous :param pause_first: True if task should be paused prior to invoking method - :return: True if success, Id(s) that could not be acted upon, else Falsy if failure + :return: True if success, ID(s) that could not be acted upon, else Falsy if failure """ if isinstance(ids, (string_types, list)): rids = ids if isinstance(ids, list) else list(map(lambda x: x.strip(), ids.split(','))) @@ -256,7 +256,7 @@ class DownloadStationAPI(GenericClient): """ Add magnet to client (overridden class function) :param search_result: A populated search result object - :return: Id of task in client, True if added but no ID, else Falsy if nothing added + :return: ID of task in client, True if added but no ID, else Falsy if nothing added """ if 3 <= self._task_version: return self._add_torrent(uri={'uri': search_result.url}) @@ -269,7 +269,7 @@ class DownloadStationAPI(GenericClient): """ Add file to client (overridden class function) :param search_result: A populated search result object - :return: Id of task in client, True if added but no ID, else Falsy if nothing added + :return: ID of task in client, True if added but no ID, else Falsy if nothing added """ return self._add_torrent( files={'file': ('%s.torrent' % re.sub(r'(\.torrent)+$', '', search_result.name), search_result.content)}) @@ -280,7 +280,7 @@ class DownloadStationAPI(GenericClient): Create client task :param uri: URI param for client API :param files: file param for client API - :return: Id of task in client, True if created but no id found, else Falsy if nothing created + :return: ID of task in client, True if created but no id found, else Falsy if nothing created """ if self._testmode: # noinspection PyUnresolvedReferences diff --git a/sickgear/clients/generic.py b/sickgear/clients/generic.py index a9200e5b..143903e9 100644 --- a/sickgear/clients/generic.py +++ b/sickgear/clients/generic.py @@ -129,7 +129,7 @@ class GenericClient(object): def _add_torrent_file(self, result): """ This should be overridden to return the True/False from the client - when a torrent is added via result.content (only .torrent file) + when a torrent is added via `result.content` (only .torrent file) """ return False @@ -179,9 +179,9 @@ class GenericClient(object): """ This should be overridden to resume task(s) in client - :param ids: Id(s) to act on + :param ids: ID(s) to act on :type ids: list or string - :return: True if success, Id(s) that could not be resumed, else Falsy if failure + :return: True if success, ID(s) that could not be resumed, else Falsy if failure :rtype: bool or list """ return False @@ -189,9 +189,9 @@ class GenericClient(object): def _delete_torrent(self, ids): """ This should be overridden to delete task(s) from client - :param ids: Id(s) to act on + :param ids: ID(s) to act on :type ids: list or string - :return: True if success, Id(s) that could not be deleted, else Falsy if failure + :return: True if success, ID(s) that could not be deleted, else Falsy if failure :rtype: bool or list """ return False @@ -200,7 +200,7 @@ class GenericClient(object): def _get_torrent_hash(result): if result.url.startswith('magnet'): - result.hash = re.findall(r'urn:btih:([\w]{32,40})', result.url)[0] + result.hash = re.findall(r'urn:btih:(\w{32,40})', result.url)[0] if 32 == len(result.hash): result.hash = make_btih(result.hash).lower() else: diff --git a/sickgear/clients/qbittorrent.py b/sickgear/clients/qbittorrent.py index 20b7690c..b9711e89 100644 --- a/sickgear/clients/qbittorrent.py +++ b/sickgear/clients/qbittorrent.py @@ -147,7 +147,7 @@ class QbittorrentAPI(GenericClient): """ Set maximal priority in queue to torrent task :param ids: ID(s) to promote - :return: True/Falsy if success/failure else Id(s) that failed to be changed + :return: True/Falsy if success/failure else ID(s) that failed to be changed """ def _maxpri_filter(t): mark_fail = True @@ -179,7 +179,7 @@ class QbittorrentAPI(GenericClient): """ Set label/category to torrent task :param ids: ID(s) to change - :return: True/Falsy if success/failure else Id(s) that failed to be changed + :return: True/Falsy if success/failure else ID(s) that failed to be changed """ def _label_filter(t): mark_fail = True @@ -205,8 +205,8 @@ class QbittorrentAPI(GenericClient): # type: (Union[AnyStr, list]) -> Union[bool, list] """ Pause item(s) - :param ids: Id(s) to pause - :return: True/Falsy if success/failure else Id(s) that failed to be paused + :param ids: ID(s) to pause + :return: True/Falsy if success/failure else ID(s) that failed to be paused """ def _pause_filter(t): mark_fail = True @@ -252,8 +252,8 @@ class QbittorrentAPI(GenericClient): # type: (Union[AnyStr, list]) -> Union[bool, list] """ Resume task(s) in client - :param ids: Id(s) to act on - :return: True if success, Id(s) that could not be resumed, else Falsy if failure + :param ids: ID(s) to act on + :return: True if success, ID(s) that could not be resumed, else Falsy if failure """ return self._perform_task( 'resume', ids, @@ -267,8 +267,8 @@ class QbittorrentAPI(GenericClient): # type: (Union[AnyStr, list]) -> Union[bool, list] """ Delete task(s) from client - :param ids: Id(s) to act on - :return: True if success, Id(s) that could not be deleted, else Falsy if failure + :param ids: ID(s) to act on + :return: True if success, ID(s) that could not be deleted, else Falsy if failure """ return self._perform_task( 'delete', ids, @@ -283,10 +283,10 @@ class QbittorrentAPI(GenericClient): """ Set up and send a method to client :param method: Either `resume` or `delete` - :param ids: Id(s) to perform method on + :param ids: ID(s) to perform method on :param filter_func: Call back function passed to _action that will filter tasks as failed or erroneous :param pause_first: True if task should be paused prior to invoking method - :return: True if success, Id(s) that could not be acted upon, else Falsy if failure + :return: True if success, ID(s) that could not be acted upon, else Falsy if failure """ if isinstance(ids, (string_types, list)): rids = ids if isinstance(ids, list) else list(map(lambda x: x.strip(), ids.split(','))) @@ -395,7 +395,7 @@ class QbittorrentAPI(GenericClient): """ Send a request to client :param cmd: Api task to invoke - :param kwargs: keyword arguments to pass thru to helpers getURL function + :param kwargs: keyword arguments to pass through to helpers getURL function :return: JSON decoded response dict, True if success and no response body, Text error or None if failure, """ authless = bool(re.search('(?i)login|version', cmd)) diff --git a/sickgear/clients/rtorrent.py b/sickgear/clients/rtorrent.py index b728c11f..332428cd 100644 --- a/sickgear/clients/rtorrent.py +++ b/sickgear/clients/rtorrent.py @@ -90,7 +90,7 @@ class RtorrentAPI(GenericClient): # try: # if ratio > 0: # - # # Explicitly set all group options to ensure it is setup correctly + # # Explicitly set all group options to ensure it is set up correctly # group.set_upload('1M') # group.set_min(ratio) # group.set_max(ratio) diff --git a/sickgear/clients/transmission.py b/sickgear/clients/transmission.py index 5c8074bb..8fb5810a 100644 --- a/sickgear/clients/transmission.py +++ b/sickgear/clients/transmission.py @@ -84,7 +84,7 @@ class TransmissionAPI(GenericClient): def _add_torrent(self, t_object): - # populate blankable and download_dir + # populate blanked and download_dir if not self._get_auth(): logger.log('%s: Authentication failed' % self.name, logger.ERROR) return False diff --git a/sickgear/clients/utorrent.py b/sickgear/clients/utorrent.py index 749a3efc..24d4a8e0 100644 --- a/sickgear/clients/utorrent.py +++ b/sickgear/clients/utorrent.py @@ -24,17 +24,17 @@ from _23 import urlencode from six import iteritems -class uTorrentAPI(GenericClient): +class UtorrentAPI(GenericClient): def __init__(self, host=None, username=None, password=None): - super(uTorrentAPI, self).__init__('uTorrent', host, username, password) + super(UtorrentAPI, self).__init__('uTorrent', host, username, password) self.url = self.host + 'gui/' def _request(self, method='get', params=None, files=None, **kwargs): params = {} if None is params else params - return super(uTorrentAPI, self)._request( + return super(UtorrentAPI, self)._request( method=method, params='token={0:s}&{1:s}'.format(self.auth, '&'.join( ['%s' % urlencode(dict([[key, str(value)]])) @@ -128,4 +128,4 @@ class uTorrentAPI(GenericClient): return self._request(params=params) -api = uTorrentAPI() +api = UtorrentAPI() diff --git a/sickgear/common.py b/sickgear/common.py index 9ad5f3ef..efdcc4e8 100644 --- a/sickgear/common.py +++ b/sickgear/common.py @@ -179,7 +179,7 @@ class Quality(object): return Quality.qualityStrings[quality].replace('SD DVD', 'SD DVD/BR/BD') @staticmethod - def _getStatusStrings(status): + def _get_status_strings(status): """ :param status: status @@ -187,14 +187,14 @@ class Quality(object): :return: :rtype: AnyStr """ - toReturn = {} + to_return = {} for _x in Quality.qualityStrings: - toReturn[Quality.compositeStatus(status, _x)] = '%s (%s)' % ( + to_return[Quality.composite_status(status, _x)] = '%s (%s)' % ( Quality.statusPrefixes[status], Quality.qualityStrings[_x]) - return toReturn + return to_return @staticmethod - def combineQualities(any_qualities, best_qualities): + def combine_qualities(any_qualities, best_qualities): # type: (List[int], List[int]) -> int """ @@ -210,7 +210,7 @@ class Quality(object): return any_quality | (best_quality << 16) @staticmethod - def splitQuality(quality): + def split_quality(quality): # type: (int) -> Tuple[List[int], List[int]] """ @@ -227,10 +227,10 @@ class Quality(object): return sorted(any_qualities), sorted(best_qualities) @staticmethod - def nameQuality(name, anime=False): + def name_quality(name, anime=False): """ Return The quality from an episode File renamed by SickGear - If no quality is achieved it will try sceneQuality regex + If no quality is achieved it will try scene_quality regex :param name: name :type name: AnyStr :param anime: is anmie @@ -247,7 +247,7 @@ class Quality(object): continue if Quality.NONE == _x: # Last chance - return Quality.sceneQuality(name, anime) + return Quality.scene_quality(name, anime) regex = r'\W' + Quality.qualityStrings[_x].replace(' ', r'\W') + r'\W' regex_match = re.search(regex, name, re.I) @@ -255,7 +255,7 @@ class Quality(object): return _x @staticmethod - def sceneQuality(name, anime=False): + def scene_quality(name, anime=False): """ Return The quality from the scene episode File :param name: name @@ -346,7 +346,7 @@ class Quality(object): return Quality.UNKNOWN @staticmethod - def fileQuality(filename): + def file_quality(filename): """ :param filename: filename @@ -405,7 +405,7 @@ class Quality(object): return Quality.UNKNOWN @staticmethod - def assumeQuality(name): + def assume_quality(name): """ :param name: name @@ -420,7 +420,7 @@ class Quality(object): return Quality.UNKNOWN @staticmethod - def compositeStatus(status, quality): + def composite_status(status, quality): """ :param status: status @@ -433,7 +433,7 @@ class Quality(object): return status + 100 * quality @staticmethod - def qualityDownloaded(status): + def quality_downloaded(status): # type: (int) -> int """ @@ -445,7 +445,7 @@ class Quality(object): return (status - DOWNLOADED) // 100 @staticmethod - def splitCompositeStatus(status): + def split_composite_status(status): # type: (int) -> Tuple[int, int] """Returns a tuple containing (status, quality) :param status: status @@ -460,7 +460,7 @@ class Quality(object): return status, Quality.NONE @staticmethod - def statusFromName(name, assume=True, anime=False): + def status_from_name(name, assume=True, anime=False): """ :param name: name @@ -472,13 +472,13 @@ class Quality(object): :return: :rtype: int or long """ - quality = Quality.nameQuality(name, anime) + quality = Quality.name_quality(name, anime) if assume and Quality.UNKNOWN == quality: - quality = Quality.assumeQuality(name) - return Quality.compositeStatus(DOWNLOADED, quality) + quality = Quality.assume_quality(name) + return Quality.composite_status(DOWNLOADED, quality) @staticmethod - def statusFromNameOrFile(file_path, assume=True, anime=False): + def status_from_name_or_file(file_path, assume=True, anime=False): """ :param file_path: file path @@ -490,12 +490,12 @@ class Quality(object): :return: :rtype: int or long """ - quality = Quality.nameQuality(file_path, anime) + quality = Quality.name_quality(file_path, anime) if Quality.UNKNOWN == quality: - quality = Quality.fileQuality(file_path) + quality = Quality.file_quality(file_path) if assume and Quality.UNKNOWN == quality: - quality = Quality.assumeQuality(file_path) - return Quality.compositeStatus(DOWNLOADED, quality) + quality = Quality.assume_quality(file_path) + return Quality.composite_status(DOWNLOADED, quality) SNATCHED = None SNATCHED_PROPER = None @@ -515,7 +515,7 @@ class WantedQualities(dict): super(WantedQualities, self).__init__(**kwargs) def _generate_wantedlist(self, qualities): - initial_qualities, upgrade_qualities = Quality.splitQuality(qualities) + initial_qualities, upgrade_qualities = Quality.split_quality(qualities) max_initial_quality = max(initial_qualities or [Quality.NONE]) min_upgrade_quality = min(upgrade_qualities or [1 << 16]) self[qualities] = {0: {self.bothlists: False, self.wantedlist: initial_qualities, self.upgradelist: False}} @@ -562,23 +562,23 @@ for (attr_name, qual_val) in [ ('SNATCHED', SNATCHED), ('SNATCHED_PROPER', SNATCHED_PROPER), ('SNATCHED_BEST', SNATCHED_BEST), ('DOWNLOADED', DOWNLOADED), ('ARCHIVED', ARCHIVED), ('FAILED', FAILED), ]: - setattr(Quality, attr_name, list(map(lambda qk: Quality.compositeStatus(qual_val, qk), + setattr(Quality, attr_name, list(map(lambda qk: Quality.composite_status(qual_val, qk), iterkeys(Quality.qualityStrings)))) Quality.SNATCHED_ANY = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_BEST -SD = Quality.combineQualities([Quality.SDTV, Quality.SDDVD], []) -HD = Quality.combineQualities( +SD = Quality.combine_qualities([Quality.SDTV, Quality.SDDVD], []) +HD = Quality.combine_qualities( [Quality.HDTV, Quality.FULLHDTV, Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.HDBLURAY, Quality.FULLHDBLURAY], []) # HD720p + HD1080p -HD720p = Quality.combineQualities([Quality.HDTV, Quality.HDWEBDL, Quality.HDBLURAY], []) -HD1080p = Quality.combineQualities([Quality.FULLHDTV, Quality.FULLHDWEBDL, Quality.FULLHDBLURAY], []) -UHD2160p = Quality.combineQualities([Quality.UHD4KWEB], []) -ANY = Quality.combineQualities( +HD720p = Quality.combine_qualities([Quality.HDTV, Quality.HDWEBDL, Quality.HDBLURAY], []) +HD1080p = Quality.combine_qualities([Quality.FULLHDTV, Quality.FULLHDWEBDL, Quality.FULLHDBLURAY], []) +UHD2160p = Quality.combine_qualities([Quality.UHD4KWEB], []) +ANY = Quality.combine_qualities( [Quality.SDTV, Quality.SDDVD, Quality.HDTV, Quality.FULLHDTV, Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.HDBLURAY, Quality.FULLHDBLURAY, Quality.UNKNOWN], []) # SD + HD # legacy template, can't remove due to reference in mainDB upgrade? -BEST = Quality.combineQualities([Quality.SDTV, Quality.HDTV, Quality.HDWEBDL], [Quality.HDTV]) +BEST = Quality.combine_qualities([Quality.SDTV, Quality.HDTV, Quality.HDWEBDL], [Quality.HDTV]) qualityPresets = (SD, HD, HD720p, HD1080p, UHD2160p, ANY) @@ -607,7 +607,7 @@ class StatusStrings(object): def __getitem__(self, name): if name in Quality.SNATCHED_ANY + Quality.DOWNLOADED + Quality.ARCHIVED: - status, quality = Quality.splitCompositeStatus(name) + status, quality = Quality.split_composite_status(name) if quality == Quality.NONE: return self.statusStrings[status] return '%s (%s)' % (self.statusStrings[status], Quality.qualityStrings[quality]) @@ -703,7 +703,7 @@ class NeededQualities(object): """ from sickgear.tv import TVShow if isinstance(show_obj, TVShow): - init, upgrade = Quality.splitQuality(show_obj.quality) + init, upgrade = Quality.split_quality(show_obj.quality) all_qual = set(init + upgrade) need_sd = need_hd = need_uhd = need_webdl = False for wanted_qualities in all_qual: diff --git a/sickgear/config.py b/sickgear/config.py index 39a497c3..9fb7aa98 100644 --- a/sickgear/config.py +++ b/sickgear/config.py @@ -152,7 +152,7 @@ def schedule_mediaprocess(iv): if sickgear.MEDIAPROCESS_INTERVAL < sickgear.MIN_MEDIAPROCESS_INTERVAL: sickgear.MEDIAPROCESS_INTERVAL = sickgear.MIN_MEDIAPROCESS_INTERVAL - sickgear.media_process_scheduler.cycleTime = datetime.timedelta(minutes=sickgear.MEDIAPROCESS_INTERVAL) + sickgear.media_process_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.MEDIAPROCESS_INTERVAL) sickgear.media_process_scheduler.set_paused_state() @@ -162,14 +162,14 @@ def schedule_recentsearch(iv): if sickgear.RECENTSEARCH_INTERVAL < sickgear.MIN_RECENTSEARCH_INTERVAL: sickgear.RECENTSEARCH_INTERVAL = sickgear.MIN_RECENTSEARCH_INTERVAL - sickgear.recent_search_scheduler.cycleTime = datetime.timedelta(minutes=sickgear.RECENTSEARCH_INTERVAL) + sickgear.recent_search_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.RECENTSEARCH_INTERVAL) def schedule_backlog(iv): sickgear.BACKLOG_PERIOD = minimax(iv, sickgear.DEFAULT_BACKLOG_PERIOD, - sickgear.MIN_BACKLOG_PERIOD, sickgear.MAX_BACKLOG_PERIOD) + sickgear.MIN_BACKLOG_PERIOD, sickgear.MAX_BACKLOG_PERIOD) - sickgear.backlog_search_scheduler.action.cycleTime = sickgear.BACKLOG_PERIOD + sickgear.backlog_search_scheduler.action.cycle_time = sickgear.BACKLOG_PERIOD def schedule_update_software(iv): @@ -178,7 +178,7 @@ def schedule_update_software(iv): if sickgear.UPDATE_INTERVAL < sickgear.MIN_UPDATE_INTERVAL: sickgear.UPDATE_INTERVAL = sickgear.MIN_UPDATE_INTERVAL - sickgear.update_software_scheduler.cycleTime = datetime.timedelta(hours=sickgear.UPDATE_INTERVAL) + sickgear.update_software_scheduler.cycle_time = datetime.timedelta(hours=sickgear.UPDATE_INTERVAL) def schedule_update_software_notify(update_notify): @@ -195,10 +195,10 @@ def schedule_update_software_notify(update_notify): def schedule_update_packages(iv): sickgear.UPDATE_PACKAGES_INTERVAL = minimax(iv, sickgear.DEFAULT_UPDATE_PACKAGES_INTERVAL, - sickgear.MIN_UPDATE_PACKAGES_INTERVAL, - sickgear.MAX_UPDATE_PACKAGES_INTERVAL) + sickgear.MIN_UPDATE_PACKAGES_INTERVAL, + sickgear.MAX_UPDATE_PACKAGES_INTERVAL) - sickgear.update_packages_scheduler.cycleTime = datetime.timedelta(hours=sickgear.UPDATE_PACKAGES_INTERVAL) + sickgear.update_packages_scheduler.cycle_time = datetime.timedelta(hours=sickgear.UPDATE_PACKAGES_INTERVAL) def schedule_update_packages_notify(update_packages_notify): @@ -228,15 +228,6 @@ def schedule_trakt(use_trakt): return sickgear.USE_TRAKT = use_trakt - # if sickgear.USE_TRAKT: - # sickgear.trakt_checker_scheduler.start() - # else: - # sickgear.trakt_checker_scheduler.stop() - # logger.log(u'Waiting for the TRAKTCHECKER thread to exit') - # try: - # sickgear.trakt_checker_scheduler.join(10) - # except: - # pass def schedule_subtitles(use_subtitles): @@ -250,7 +241,7 @@ def schedule_emby_watched(emby_watched_interval): 0, sickgear.MAX_WATCHEDSTATE_INTERVAL) if emby_watched_iv and emby_watched_iv != sickgear.EMBY_WATCHEDSTATE_INTERVAL: sickgear.EMBY_WATCHEDSTATE_INTERVAL = emby_watched_iv - sickgear.emby_watched_state_scheduler.cycleTime = datetime.timedelta(minutes=emby_watched_iv) + sickgear.emby_watched_state_scheduler.cycle_time = datetime.timedelta(minutes=emby_watched_iv) sickgear.EMBY_WATCHEDSTATE_SCHEDULED = bool(emby_watched_iv) sickgear.emby_watched_state_scheduler.set_paused_state() @@ -261,7 +252,7 @@ def schedule_plex_watched(plex_watched_interval): 0, sickgear.MAX_WATCHEDSTATE_INTERVAL) if plex_watched_iv and plex_watched_iv != sickgear.PLEX_WATCHEDSTATE_INTERVAL: sickgear.PLEX_WATCHEDSTATE_INTERVAL = plex_watched_iv - sickgear.plex_watched_state_scheduler.cycleTime = datetime.timedelta(minutes=plex_watched_iv) + sickgear.plex_watched_state_scheduler.cycle_time = datetime.timedelta(minutes=plex_watched_iv) sickgear.PLEX_WATCHEDSTATE_SCHEDULED = bool(plex_watched_iv) sickgear.plex_watched_state_scheduler.set_paused_state() @@ -345,7 +336,7 @@ def clean_hosts(hosts, default_port=None, allow_base=False): def clean_url(url, add_slash=True): - """ Returns an cleaned url starting with a scheme and folder with trailing '/' or an empty string """ + """ Returns a cleaned url starting with a scheme and folder with trailing '/' or an empty string """ if url and url.strip(): @@ -437,7 +428,7 @@ def check_setting_float(config, cfg_name, item_name, def_val): def check_setting_str(config, cfg_name, item_name, def_val, log=True): """ - For passwords you must include the word `password` in the item_name and + For passwords, you must include the word `password` in the item_name and add `helpers.encrypt(ITEM_NAME, ENCRYPTION_VERSION)` in save_config() """ @@ -662,7 +653,7 @@ class ConfigMigrator(object): Reads in the old naming settings from your config and generates a new config template from them. """ # get the old settings from the file and store them in the new variable names - for prov in [curProvider for curProvider in sickgear.providers.sortedProviderList() + for prov in [curProvider for curProvider in sickgear.providers.sorted_sources() if 'omgwtfnzbs' == curProvider.name]: prov.username = check_setting_str(self.config_obj, 'omgwtfnzbs', 'omgwtfnzbs_uid', '') prov.api_key = check_setting_str(self.config_obj, 'omgwtfnzbs', 'omgwtfnzbs_key', '') @@ -773,13 +764,13 @@ class ConfigMigrator(object): # Migration v6: Rename daily search to recent search def _migrate_v6(self): sickgear.RECENTSEARCH_INTERVAL = check_setting_int(self.config_obj, 'General', 'dailysearch_frequency', - sickgear.DEFAULT_RECENTSEARCH_INTERVAL) + sickgear.DEFAULT_RECENTSEARCH_INTERVAL) sickgear.RECENTSEARCH_STARTUP = bool(check_setting_int(self.config_obj, 'General', 'dailysearch_startup', 1)) if sickgear.RECENTSEARCH_INTERVAL < sickgear.MIN_RECENTSEARCH_INTERVAL: sickgear.RECENTSEARCH_INTERVAL = sickgear.MIN_RECENTSEARCH_INTERVAL - for curProvider in sickgear.providers.sortedProviderList(): + for curProvider in sickgear.providers.sorted_sources(): if hasattr(curProvider, 'enable_recentsearch'): curProvider.enable_recentsearch = bool(check_setting_int( self.config_obj, curProvider.get_id().upper(), curProvider.get_id() + '_enable_dailysearch', 1)) @@ -831,7 +822,7 @@ class ConfigMigrator(object): # Migration v15: Transmithe.net variables def _migrate_v15(self): try: - neb = list(filter(lambda p: 'Nebulance' in p.name, sickgear.providers.sortedProviderList()))[0] + neb = list(filter(lambda p: 'Nebulance' in p.name, sickgear.providers.sorted_sources()))[0] except (BaseException, Exception): return # get the old settings from the file and store them in the new variable names diff --git a/sickgear/databases/mainDB.py b/sickgear/databases/mainDB.py index be2edcf1..249751c4 100644 --- a/sickgear/databases/mainDB.py +++ b/sickgear/databases/mainDB.py @@ -474,7 +474,7 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): continue # get the status/quality of the existing ep and make sure it's what we expect - ep_status, ep_quality = common.Quality.splitCompositeStatus(int(sql_result[0]['status'])) + ep_status, ep_quality = common.Quality.split_composite_status(int(sql_result[0]['status'])) if ep_status != common.DOWNLOADED: continue @@ -581,8 +581,8 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): """ def _update_status(self, old_status): - (status, quality) = common.Quality.splitCompositeStatus(old_status) - return common.Quality.compositeStatus(status, self._update_quality(quality)) + (status, quality) = common.Quality.split_composite_status(old_status) + return common.Quality.composite_status(status, self._update_quality(quality)) @staticmethod def _update_quality(old_quality): @@ -635,17 +635,17 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): sickgear.save_config() # upgrade previous HD to HD720p -- shift previous qualities to new placevalues - old_hd = common.Quality.combineQualities( + old_hd = common.Quality.combine_qualities( [common.Quality.HDTV, common.Quality.HDWEBDL >> 2, common.Quality.HDBLURAY >> 3], []) - new_hd = common.Quality.combineQualities([common.Quality.HDTV, common.Quality.HDWEBDL, - common.Quality.HDBLURAY], []) + new_hd = common.Quality.combine_qualities([common.Quality.HDTV, common.Quality.HDWEBDL, + common.Quality.HDBLURAY], []) # update ANY -- shift existing qualities and add new 1080p qualities, # note that rawHD was not added to the ANY template - old_any = common.Quality.combineQualities( + old_any = common.Quality.combine_qualities( [common.Quality.SDTV, common.Quality.SDDVD, common.Quality.HDTV, common.Quality.HDWEBDL >> 2, common.Quality.HDBLURAY >> 3, common.Quality.UNKNOWN], []) - new_any = common.Quality.combineQualities( + new_any = common.Quality.combine_qualities( [common.Quality.SDTV, common.Quality.SDDVD, common.Quality.HDTV, common.Quality.FULLHDTV, common.Quality.HDWEBDL, common.Quality.FULLHDWEBDL, common.Quality.HDBLURAY, common.Quality.FULLHDBLURAY, common.Quality.UNKNOWN], []) diff --git a/sickgear/event_queue.py b/sickgear/event_queue.py index d9a42daa..2975c380 100644 --- a/sickgear/event_queue.py +++ b/sickgear/event_queue.py @@ -33,7 +33,7 @@ class Events(threading.Thread): # get event type etype = self.queue.get(True, 1) - # perform callback if we got a event type + # perform callback if we got an event type self.callback(etype) # event completed diff --git a/sickgear/failedProcessor.py b/sickgear/failedProcessor.py index b1c7b4d8..e1e6a40b 100644 --- a/sickgear/failedProcessor.py +++ b/sickgear/failedProcessor.py @@ -69,19 +69,19 @@ class FailedProcessor(LegacyFailedProcessor): """ self._log(u'Failed download detected: (%s, %s)' % (self.nzb_name, self.dir_name)) - releaseName = show_name_helpers.determine_release_name(self.dir_name, self.nzb_name) - if None is releaseName: + release_name = show_name_helpers.determine_release_name(self.dir_name, self.nzb_name) + if None is release_name: self._log(u'Warning: unable to find a valid release name.', logger.WARNING) raise exceptions_helper.FailedProcessingFailed() try: parser = NameParser(False, show_obj=self.show_obj, convert=True) - parsed = parser.parse(releaseName) + parsed = parser.parse(release_name) except InvalidNameException: - self._log(u'Error: release name is invalid: ' + releaseName, logger.DEBUG) + self._log(u'Error: release name is invalid: ' + release_name, logger.DEBUG) raise exceptions_helper.FailedProcessingFailed() except InvalidShowException: - self._log(u'Error: unable to parse release name %s into a valid show' % releaseName, logger.DEBUG) + self._log(u'Error: unable to parse release name %s into a valid show' % release_name, logger.DEBUG) raise exceptions_helper.FailedProcessingFailed() logger.log(u"name_parser info: ", logger.DEBUG) diff --git a/sickgear/failed_history.py b/sickgear/failed_history.py index 0989c0ed..8edc87fe 100644 --- a/sickgear/failed_history.py +++ b/sickgear/failed_history.py @@ -160,8 +160,8 @@ def set_episode_failed(ep_obj): """ try: with ep_obj.lock: - quality = Quality.splitCompositeStatus(ep_obj.status)[1] - ep_obj.status = Quality.compositeStatus(FAILED, quality) + quality = Quality.split_composite_status(ep_obj.status)[1] + ep_obj.status = Quality.composite_status(FAILED, quality) ep_obj.save_to_db() except EpisodeNotFoundException as e: @@ -231,7 +231,7 @@ def revert_episode(ep_obj): if ep_obj.episode in history_eps: status_revert = history_eps[ep_obj.episode]['old_status'] - status, quality = Quality.splitCompositeStatus(status_revert) + status, quality = Quality.split_composite_status(status_revert) logger.log('Found in failed.db history with status: %s quality: %s' % ( statusStrings[status], Quality.qualityStrings[quality])) else: diff --git a/sickgear/generic_queue.py b/sickgear/generic_queue.py index b29d0ebc..d57ebcbe 100644 --- a/sickgear/generic_queue.py +++ b/sickgear/generic_queue.py @@ -175,7 +175,7 @@ class GenericQueue(object): """ clear queue excluding internal defined types - :param action_types: only clear all of given action type + :param action_types: only clear supplied action types """ if not isinstance(action_types, list): action_types = [action_types] diff --git a/sickgear/gh_api.py b/sickgear/gh_api.py index bca5e589..7c78c4ed 100644 --- a/sickgear/gh_api.py +++ b/sickgear/gh_api.py @@ -23,7 +23,7 @@ if False: class GitHub(object): """ - Simple api wrapper for the Github API v3. Currently only supports the small thing that SB + Simple api wrapper for the GitHub API v3. Currently only supports the small thing that SB needs it for - list of commits. """ @@ -34,7 +34,7 @@ class GitHub(object): self.branch = branch @staticmethod - def _access_API(path, params=None): + def _access_api(path, params=None): """ Access the API at the path given and with the optional params given. @@ -49,55 +49,57 @@ class GitHub(object): if params and type(params) is dict: url += '?' + '&'.join([str(x) + '=' + str(params[x]) for x in params]) - parsedJSON = helpers.get_url(url, parse_json=True) - if not parsedJSON: + parsed_json = helpers.get_url(url, parse_json=True) + if not parsed_json: return [] - return parsedJSON + return parsed_json def commits(self): """ Get a list of the 100 most recent commits from the specified user/repo/branch, starting from HEAD. - user: The github username of the person whose repo you're querying + user: The GitHub username of the person whose repo you're querying repo: The repo name to query branch: Optional, the branch name to show commits from - Returns a deserialized json object containing the commit info. See http://developer.github.com/v3/repos/commits/ + Returns a deserialized json object containing the commit info. + See https://developer.github.com/v3/repos/commits/ """ - access_API = self._access_API(['repos', self.github_repo_user, self.github_repo, 'commits'], + access_api = self._access_api(['repos', self.github_repo_user, self.github_repo, 'commits'], params={'per_page': 100, 'sha': self.branch}) - return access_API + return access_api def compare(self, base, head, per_page=1): """ Uses the API to get a list of compares between base and head. - user: The github username of the person whose repo you're querying + user: The GitHub username of the person whose repo you're querying repo: The repo name to query base: Start compare from branch head: Current commit sha or branch name to compare per_page: number of items per page - Returns a deserialized json object containing the compare info. See http://developer.github.com/v3/repos/commits + Returns a deserialized json object containing the compare info. + See https://developer.github.com/v3/repos/commits """ - access_API = self._access_API( + access_api = self._access_api( ['repos', self.github_repo_user, self.github_repo, 'compare', base + '...' + head], params={'per_page': per_page}) - return access_API + return access_api def branches(self): - access_API = self._access_API( + access_api = self._access_api( ['repos', self.github_repo_user, self.github_repo, 'branches'], params={'per_page': 100}) - return access_API + return access_api def pull_requests(self): - access_API = self._access_API( + access_api = self._access_api( ['repos', self.github_repo_user, self.github_repo, 'pulls'], params={'per_page': 100}) # type: Optional[Dict] pulls = [] - for x in access_API: + for x in access_api: try: pull = PullRequest(x['head']['ref'], x['number']) pulls.append((repr(pull), pull.fetch_name())) diff --git a/sickgear/helpers.py b/sickgear/helpers.py index 9a8b9db6..3fc8c499 100644 --- a/sickgear/helpers.py +++ b/sickgear/helpers.py @@ -63,7 +63,7 @@ if False: from typing import Any, AnyStr, Dict, Generator, NoReturn, Iterable, Iterator, List, Optional, Set, Tuple, Union from .tv import TVShow # the following workaround hack resolves a pyc resolution bug - from .name_cache import retrieveNameFromCache + from .name_cache import retrieve_name_from_cache from six import integer_types RE_XML_ENCODING = re.compile(r'^(<\?xml[^>]+)\s+(encoding\s*=\s*[\"\'][^\"\']*[\"\'])(\s*\?>|)', re.U) @@ -954,7 +954,7 @@ def get_show(name, try_scene_exceptions=False): show_obj = None try: - tvid, prodid = sickgear.name_cache.retrieveNameFromCache(name) + tvid, prodid = sickgear.name_cache.retrieve_name_from_cache(name) if tvid and prodid: show_obj = find_show_by_id({tvid: prodid}) @@ -1284,7 +1284,7 @@ def check_port(host, port, timeout=1.0): def clear_unused_providers(): - providers = [x.cache.providerID for x in sickgear.providers.sortedProviderList() if x.is_active()] + providers = [x.cache.providerID for x in sickgear.providers.sorted_sources() if x.is_active()] if providers: my_db = db.DBConnection('cache.db') @@ -1391,7 +1391,7 @@ def should_delete_episode(status): :return: should be deleted :rtype: bool """ - s = Quality.splitCompositeStatus(status)[0] + s = Quality.split_composite_status(status)[0] if s not in SNATCHED_ANY + [DOWNLOADED, ARCHIVED, IGNORED]: return True logger.log('not safe to delete episode from db because of status: %s' % statusStrings[s], logger.DEBUG) @@ -1515,7 +1515,7 @@ def get_overview(ep_status, show_quality, upgrade_once, split_snatch=False): :type split_snatch: bool :return: constant from classes Overview """ - status, quality = Quality.splitCompositeStatus(ep_status) + status, quality = Quality.split_composite_status(ep_status) if ARCHIVED == status: return Overview.GOOD if WANTED == status: @@ -1531,7 +1531,7 @@ def get_overview(ep_status, show_quality, upgrade_once, split_snatch=False): if not split_snatch and status in SNATCHED_ANY: return Overview.SNATCHED - void, best_qualities = Quality.splitQuality(show_quality) + void, best_qualities = Quality.split_quality(show_quality) # if re-downloads aren't wanted then mark it "good" if there is anything if not len(best_qualities): return Overview.GOOD diff --git a/sickgear/history.py b/sickgear/history.py index 49d2f68e..209b3e9a 100644 --- a/sickgear/history.py +++ b/sickgear/history.py @@ -72,7 +72,7 @@ def log_snatch(search_result): else: provider = 'unknown' - action = Quality.compositeStatus((SNATCHED, SNATCHED_PROPER)[is_proper], search_result.quality) + action = Quality.composite_status((SNATCHED, SNATCHED_PROPER)[is_proper], search_result.quality) resource = search_result.name @@ -120,8 +120,8 @@ def log_subtitle(tvid, prodid, season, episode, status, subtitle_result): """ resource = subtitle_result.path provider = subtitle_result.service - status, quality = Quality.splitCompositeStatus(status) - action = Quality.compositeStatus(SUBTITLED, quality) + status, quality = Quality.split_composite_status(status) + action = Quality.composite_status(SUBTITLED, quality) _log_history_item(action, tvid, prodid, season, episode, quality, resource, provider) @@ -135,8 +135,8 @@ def log_failed(ep_obj, release, provider=None): :param release: release :param provider: provider name """ - status, quality = Quality.splitCompositeStatus(ep_obj.status) - action = Quality.compositeStatus(FAILED, quality) + status, quality = Quality.split_composite_status(ep_obj.status) + action = Quality.composite_status(FAILED, quality) _log_history_item(action, ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season, ep_obj.episode, quality, release, provider) @@ -210,7 +210,7 @@ def history_snatched_proper_fix(): continue if 0 < Quality.get_proper_level(pr.extra_info_no_name(), pr.version, pr.is_anime): cl.append(['UPDATE history SET action = ? WHERE rowid = ?', - [Quality.compositeStatus(SNATCHED_PROPER, int(r['quality'])), + [Quality.composite_status(SNATCHED_PROPER, int(r['quality'])), r['rowid']]]) if cl: my_db.mass_action(cl) diff --git a/sickgear/image_cache.py b/sickgear/image_cache.py index 8648bd22..f0372358 100644 --- a/sickgear/image_cache.py +++ b/sickgear/image_cache.py @@ -271,7 +271,7 @@ class ImageCache(object): """ :param image_file: image file :type image_file: AnyStr - :return: true if a image_file exists + :return: true if an image_file exists :rtype: bool """ result = [] @@ -652,7 +652,7 @@ class ImageCache(object): if thumb_img_data: thumb_result = metadata_generator.write_image(thumb_img_data, dest_thumb_path, force=True) if not thumb_result: - thumb_result = metadata_generator.write_image(img_data, dest_thumb_path, force=True) + metadata_generator.write_image(img_data, dest_thumb_path, force=True) break if result: diff --git a/sickgear/indexermapper.py b/sickgear/indexermapper.py index 26e70480..38d0f022 100644 --- a/sickgear/indexermapper.py +++ b/sickgear/indexermapper.py @@ -132,7 +132,7 @@ def confirm_show(premiere_date, shows_premiere, expected_name, show_name): # type: (Optional[datetime.date], Optional[Union[AnyStr, datetime.date]], AnyStr, AnyStr) -> bool """ confirm show possible confirmations: - 1. premiere dates are less then 2 days apart + 1. premiere dates are less than 2 days apart 2. show name is the same and premiere year is 1 year or less apart :param premiere_date: expected show premiere date @@ -252,7 +252,7 @@ def map_indexers_to_show(show_obj, update=False, force=False, recheck=False, im_ all_ids_srcs = [src_tv_id] + [s for s in (TVINFO_TRAKT, TVINFO_TMDB, TVINFO_TVMAZE, TVINFO_TVDB, TVINFO_IMDB) if s != src_tv_id] searched, confirmed = {}, False - for r in moves.range(len(all_ids_srcs)): + for _ in moves.range(len(all_ids_srcs)): search_done = False for i in all_ids_srcs: if new_ids.verified.get(i): diff --git a/sickgear/logger.py b/sickgear/logger.py index 39821266..2e479737 100644 --- a/sickgear/logger.py +++ b/sickgear/logger.py @@ -263,8 +263,8 @@ class SBRotatingLogHandler(object): buf = fh.read(min(remaining_size, buf_size)) remaining_size -= buf_size lines = buf.split('\n') - # the first line of the buffer is probably not a complete line so - # we'll save it and append it to the last line of the next buffer + # the first line of the buffer is probably not a complete line, + # so save it and append it to the last line of the next buffer # we read if None is not segment: # if the previous chunk starts right from the beginning of line diff --git a/sickgear/metadata/__init__.py b/sickgear/metadata/__init__.py index 95fbcf48..1dbf73ad 100644 --- a/sickgear/metadata/__init__.py +++ b/sickgear/metadata/__init__.py @@ -25,7 +25,7 @@ def available_generators(): return list(filter(lambda x: x not in ('generic', 'helpers'), __all__)) -def _getMetadataModule(name): +def _get_metadata_module(name): name = name.lower() prefix = "sickgear.metadata." if name in __all__ and prefix + name in sys.modules: @@ -33,8 +33,8 @@ def _getMetadataModule(name): return None -def _getMetadataClass(name): - module = _getMetadataModule(name) +def _get_metadata_class(name): + module = _get_metadata_module(name) if not module: return None @@ -45,10 +45,10 @@ def _getMetadataClass(name): def get_metadata_generator_dict(): result = {} for cur_generator_id in available_generators(): - cur_generator = _getMetadataClass(cur_generator_id) + cur_generator = _get_metadata_class(cur_generator_id) if not cur_generator: continue result[cur_generator.name] = cur_generator return result - + diff --git a/sickgear/metadata/generic.py b/sickgear/metadata/generic.py index d80022b3..62b122dc 100644 --- a/sickgear/metadata/generic.py +++ b/sickgear/metadata/generic.py @@ -613,7 +613,7 @@ class GenericMetadata(object): logger.log(u"No thumb is available for this episode, not creating a thumb", logger.DEBUG) return False - thumb_data = metadata_helpers.getShowImage(thumb_url, show_name=ep_obj.show_obj.name) + thumb_data = metadata_helpers.get_show_image(thumb_url, show_name=ep_obj.show_obj.name) result = self._write_image(thumb_data, file_path) @@ -711,7 +711,7 @@ class GenericMetadata(object): if 0 == len(cur_season_art): continue - # Just grab whatever's there for now + # Just grab whatever is there for now art_id, season_url = cur_season_art.popitem() season_poster_file_path = self.get_season_poster_path(show_obj, cur_season) @@ -721,7 +721,7 @@ class GenericMetadata(object): logger.DEBUG) continue - season_data = metadata_helpers.getShowImage(season_url, show_name=show_obj.name) + season_data = metadata_helpers.get_show_image(season_url, show_name=show_obj.name) if not season_data: logger.log(u'No season poster data available, skipping this season', logger.DEBUG) @@ -756,7 +756,7 @@ class GenericMetadata(object): if 0 == len(cur_season_art): continue - # Just grab whatever's there for now + # Just grab whatever is there for now art_id, season_url = cur_season_art.popitem() season_banner_file_path = self.get_season_banner_path(show_obj, cur_season) @@ -766,7 +766,7 @@ class GenericMetadata(object): logger.DEBUG) continue - season_data = metadata_helpers.getShowImage(season_url, show_name=show_obj.name) + season_data = metadata_helpers.get_show_image(season_url, show_name=show_obj.name) if not season_data: logger.log(u'No season banner data available, skipping this season', logger.DEBUG) @@ -854,7 +854,7 @@ class GenericMetadata(object): def _get_show_info(tv_id): try: show_lang = show_obj.lang - # There's gotta be a better way of doing this but we don't wanna + # There's gotta be a better way of doing this, but we don't want to # change the language value elsewhere tvinfo_config = sickgear.TVInfoAPI(tv_id).api_params.copy() tvinfo_config['fanart'] = True @@ -1058,7 +1058,7 @@ class GenericMetadata(object): if image_type in ('poster', 'banner'): if isinstance(image_url, tuple): image_url = image_url[0] - img_data = metadata_helpers.getShowImage(image_url, which, show_obj.name) + img_data = metadata_helpers.get_show_image(image_url, which, show_obj.name) if img_cache_type and img_cache_type != image_cache.which_type(img_data, is_binary=True): img_data = None continue @@ -1082,7 +1082,7 @@ class GenericMetadata(object): result = {} try: - # There's gotta be a better way of doing this but we don't wanna + # There's gotta be a better way of doing this, but we don't want to # change the language value elsewhere tvinfo_config = sickgear.TVInfoAPI(show_obj.tvid).api_params.copy() tvinfo_config[image_type] = True diff --git a/sickgear/metadata/helpers.py b/sickgear/metadata/helpers.py index fe046379..f0f5254a 100644 --- a/sickgear/metadata/helpers.py +++ b/sickgear/metadata/helpers.py @@ -22,7 +22,7 @@ if False: from typing import AnyStr, Optional -def getShowImage(url, img_num=None, show_name=None, supress_log=False): +def get_show_image(url, img_num=None, show_name=None, supress_log=False): # type: (AnyStr, Optional[int], Optional[AnyStr], bool) -> Optional[bytes] """ diff --git a/sickgear/metadata/kodi.py b/sickgear/metadata/kodi.py index 8472f30c..9723d940 100644 --- a/sickgear/metadata/kodi.py +++ b/sickgear/metadata/kodi.py @@ -107,7 +107,7 @@ class KODIMetadata(generic.GenericMetadata): show_obj: a TVShow instance to create the NFO for """ - show_ID = show_obj.prodid + show_id = show_obj.prodid show_lang = show_obj.lang tvinfo_config = sickgear.TVInfoAPI(show_obj.tvid).api_params.copy() @@ -125,9 +125,9 @@ class KODIMetadata(generic.GenericMetadata): tv_node = etree.Element('tvshow') try: - show_info = t[int(show_ID)] + show_info = t[int(show_id)] except BaseTVinfoShownotfound as e: - logger.log('Unable to find show with id %s on %s, skipping it' % (show_ID, sickgear.TVInfoAPI( + logger.log('Unable to find show with id %s on %s, skipping it' % (show_id, sickgear.TVInfoAPI( show_obj.tvid).name), logger.ERROR) raise e except BaseTVinfoError as e: @@ -141,7 +141,7 @@ class KODIMetadata(generic.GenericMetadata): # check for title and id if None is getattr(show_info, 'seriesname', None) or None is getattr(show_info, 'id', None): - logger.log('Incomplete info for show with id %s on %s, skipping it' % (show_ID, sickgear.TVInfoAPI( + logger.log('Incomplete info for show with id %s on %s, skipping it' % (show_id, sickgear.TVInfoAPI( show_obj.tvid).name), logger.ERROR) return False @@ -171,7 +171,7 @@ class KODIMetadata(generic.GenericMetadata): uniqueid = etree.SubElement(tv_node, 'uniqueid', **kwargs) uniqueid.text = '%s%s' % (('', 'tt')[TVINFO_IMDB == tvid], mid) if not has_id: - logger.log('Incomplete info for show with id %s on %s, skipping it' % (show_ID, sickgear.TVInfoAPI( + logger.log('Incomplete info for show with id %s on %s, skipping it' % (show_id, sickgear.TVInfoAPI( show_obj.tvid).name), logger.ERROR) return False diff --git a/sickgear/name_cache.py b/sickgear/name_cache.py index 1ecac246..c7225a27 100644 --- a/sickgear/name_cache.py +++ b/sickgear/name_cache.py @@ -32,7 +32,7 @@ sceneNameCache = {} nameCacheLock = threading.Lock() -def addNameToCache(name, tvid=0, prodid=0, season=-1): +def add_name_to_cache(name, tvid=0, prodid=0, season=-1): """Adds the show & tvdb id to the namecache :param name: the show name to cache @@ -41,7 +41,7 @@ def addNameToCache(name, tvid=0, prodid=0, season=-1): :type tvid: int :param prodid: the production id that this show should be cached with (can be None/0 for unknown) :type prodid: int or long - :param season: the season the the name exception belongs to. -1 for generic exception + :param season: the season the name exception belongs to. -1 for generic exception :type season: int """ global nameCache @@ -53,7 +53,7 @@ def addNameToCache(name, tvid=0, prodid=0, season=-1): nameCache[name] = [int(tvid), int(prodid), season] -def retrieveNameFromCache(name): +def retrieve_name_from_cache(name): # type: (AnyStr) -> Union[Tuple[int, int], Tuple[None, None]] """Looks up the given name in the name cache @@ -71,7 +71,7 @@ def retrieveNameFromCache(name): return None, None -def buildNameCache(show_obj=None, update_only_scene=False): +def build_name_cache(show_obj=None, update_only_scene=False): # type: (Optional[Union[TVShow, TVShowBase]], bool) -> None """Adds all new name exceptions to the namecache memory and flushes any removed name exceptions @@ -104,7 +104,7 @@ def buildNameCache(show_obj=None, update_only_scene=False): for cur_so in sickgear.showList if cur_so]) sceneNameCache = {} - cacheDB = db.DBConnection() + cache_db = db.DBConnection() cache_results = [] if update_only_scene: @@ -117,7 +117,7 @@ def buildNameCache(show_obj=None, update_only_scene=False): tmp_scene_name_cache = sceneNameCache.copy() for t, s in iteritems(show_ids): - cache_results += cacheDB.select( + cache_results += cache_db.select( 'SELECT show_name, indexer AS tv_id, indexer_id AS prod_id, season' ' FROM scene_exceptions' ' WHERE indexer = %s AND indexer_id IN (%s)' % (t, ','.join(['%s' % i for i in s]))) diff --git a/sickgear/name_parser/parser.py b/sickgear/name_parser/parser.py index a1d37109..c1769f1b 100644 --- a/sickgear/name_parser/parser.py +++ b/sickgear/name_parser/parser.py @@ -260,7 +260,7 @@ class NameParser(object): if 'extra_info' in named_groups: tmp_extra_info = match.group('extra_info') - # Show.S04.Special or Show.S05.Part.2.Extras is almost certainly not every episode in the season + # Show.S04.Special or Show.S05.Part.2.Extras are almost certainly not every episode in the season if tmp_extra_info and 'season_only' == cur_regex_name and re.search( r'([. _-]|^)(special|extra)s?\w*([. _-]|$)', tmp_extra_info, re.I): continue @@ -292,7 +292,7 @@ class NameParser(object): matches.append(result) if len(matches): - # pick best match with highest score based on placement + # pick best match with the highest score based on placement best_result = max(sorted(matches, reverse=True, key=lambda x: x.which_regex), key=lambda x: x.score) show_obj = None @@ -326,7 +326,7 @@ class NameParser(object): # get quality new_name = helpers.remove_non_release_groups(name, show_obj.is_anime) - best_result.quality = common.Quality.nameQuality(new_name, show_obj.is_anime) + best_result.quality = common.Quality.name_quality(new_name, show_obj.is_anime) new_episode_numbers = [] new_season_numbers = [] @@ -451,7 +451,7 @@ class NameParser(object): 'SickGear does not support this. ' 'Sorry.' % (str(new_season_numbers))) - # I guess it's possible that we'd have duplicate episodes too, so lets + # I guess it's possible that we'd have duplicate episodes too, so let's # eliminate them new_episode_numbers = list(set(new_episode_numbers)) new_episode_numbers.sort() @@ -500,20 +500,20 @@ class NameParser(object): if not second: return getattr(first, attr) - a = getattr(first, attr, []) - b = getattr(second, attr) + first_val = getattr(first, attr, []) + second_val = getattr(second, attr) - # if a is good use it - if None is not a or (isinstance(a, list) and len(a)): - return a + # if first_val is good use it + if None is not first_val or (isinstance(first_val, list) and len(first_val)): + return first_val # if not use b (if b isn't set it'll just be default) - return b + return second_val @staticmethod - def _unicodify(obj, encoding='utf-8'): + def _unicodify(obj, encoding='utf8'): if isinstance(obj, text_type): try: - return obj.encode('latin1').decode('utf8') + return obj.encode('latin1').decode(encoding) except (BaseException, Exception): pass return obj diff --git a/sickgear/name_parser/regexes.py b/sickgear/name_parser/regexes.py index 9a6b30db..85df55a5 100644 --- a/sickgear/name_parser/regexes.py +++ b/sickgear/name_parser/regexes.py @@ -14,7 +14,7 @@ # You should have received a copy of the GNU General Public License # along with SickGear. If not, see . -# all regexes are case insensitive +# all regexes are case-insensitive normal_regexes = [ ('garbage_name', diff --git a/sickgear/naming.py b/sickgear/naming.py index 9a24e43c..6d34d227 100644 --- a/sickgear/naming.py +++ b/sickgear/naming.py @@ -109,7 +109,7 @@ class TVEpisodeSample(tv.TVEpisode): self.scene_absolute_number = absolute_number # type: int self._airdate = datetime.date(2010, 3, 9) # type: datetime.date self.show_obj = TVShowSample() # type: TVShowSample - self._status = Quality.compositeStatus(common.DOWNLOADED, common.Quality.SDTV) # type: int + self._status = Quality.composite_status(common.DOWNLOADED, common.Quality.SDTV) # type: int self._release_name = 'Show.Name.S02E03.HDTV.XviD-RLSGROUP' # type: AnyStr self._is_proper = True # type: bool self._version = 2 # type: int @@ -196,7 +196,7 @@ def check_valid_abd_naming(pattern=None): def check_valid_sports_naming(pattern=None): """ - Checks if the name is can be parsed back to its original form for an sports format. + Checks if the name is can be parsed back to its original form for a sports format. Returns true if the naming is valid, false if not. :param pattern: String Naming Pattern @@ -294,7 +294,7 @@ def generate_sample_ep(multi=None, abd=False, sports=False, anime=False, anime_t # make a fake episode object sample_ep_obj = TVEpisodeSample(2, 3, 3, 'Ep Name') - sample_ep_obj._status = Quality.compositeStatus(DOWNLOADED, Quality.HDTV) + sample_ep_obj._status = Quality.composite_status(DOWNLOADED, Quality.HDTV) sample_ep_obj._airdate = datetime.date(2011, 3, 9) if abd: @@ -313,14 +313,14 @@ def generate_sample_ep(multi=None, abd=False, sports=False, anime=False, anime_t if None is not multi: sample_ep_obj._name = 'Ep Name (1)' second_ep = TVEpisodeSample(2, 4, 4, 'Ep Name (2)') - second_ep._status = Quality.compositeStatus(DOWNLOADED, Quality.HDTV) + second_ep._status = Quality.composite_status(DOWNLOADED, Quality.HDTV) normal_naming = not anime or 3 == anime_type release_name = sample_ep_obj._release_name = second_ep._release_name = \ ('Show.Name.003-004.HDTV.XviD-RLSGROUP', 'Show.Name.S02E03E04E05.HDTV.XviD-RLSGROUP')[normal_naming] sample_ep_obj.related_ep_obj.append(second_ep) if normal_naming: third_ep = TVEpisodeSample(2, 5, 5, 'Ep Name (3)') - third_ep._status = Quality.compositeStatus(DOWNLOADED, Quality.HDTV) + third_ep._status = Quality.composite_status(DOWNLOADED, Quality.HDTV) third_ep._release_name = release_name sample_ep_obj.related_ep_obj.append(third_ep) else: diff --git a/sickgear/network_timezones.py b/sickgear/network_timezones.py index 961b9511..c6c548ce 100644 --- a/sickgear/network_timezones.py +++ b/sickgear/network_timezones.py @@ -36,7 +36,7 @@ if False: from _23 import DirEntry from typing import AnyStr, Optional, Tuple, Union -# regex to parse time (12/24 hour format) +# regex to parse time (12/24-hour format) time_regex = re.compile(r'(\d{1,2})(([:.](\d{2}))? ?([PA][. ]? ?M)|[:.](\d{2}))\b', flags=re.I) am_regex = re.compile(r'(A[. ]? ?M)', flags=re.I) pm_regex = re.compile(r'(P[. ]? ?M)', flags=re.I) @@ -174,7 +174,7 @@ def _update_zoneinfo(): url_data = helpers.get_url(url) if None is url_data: update_last_retry() - # when None is urlData, trouble connecting to github + # when None is urlData, trouble connecting to GitHub logger.log(u'Fetching zoneinfo.txt failed, this can happen from time to time. Unable to get URL: %s' % url, logger.WARNING) return @@ -263,13 +263,13 @@ def update_network_dict(): network_tz_data = {} - # network timezones are stored on github pages + # network timezones are stored on GitHub pages url = 'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/network_timezones.txt' url_data = helpers.get_url(url) if url_data in (None, ''): update_last_retry() - # When None is urlData, trouble connecting to github + # When None is urlData, trouble connecting to GitHub logger.debug(u'Updating network timezones failed, this can happen from time to time. URL: %s' % url) load_network_dict(load=False) return @@ -413,7 +413,7 @@ def parse_time(time_of_day): hour = helpers.try_int(time_parsed.group(1)) mins = helpers.try_int(time_parsed.group(4)) ampm = time_parsed.group(5) - # convert am/pm to 24 hour clock + # convert am/pm to 24-hour clock if None is not ampm: if None is not pm_regex.search(ampm) and 12 != hour: hour += 12 @@ -505,13 +505,13 @@ def _load_network_conversions(): conversions_in = [] - # network conversions are stored on github pages + # network conversions are stored on GitHub pages url = 'https://raw.githubusercontent.com/prinz23/sg_network_conversions/master/conversions.txt' url_data = helpers.get_url(url) if url_data in (None, ''): update_last_retry() - # when no url_data, trouble connecting to github + # when no url_data, trouble connecting to GitHub logger.debug(u'Updating network conversions failed, this can happen from time to time. URL: %s' % url) return diff --git a/sickgear/nzbSplitter.py b/sickgear/nzbSplitter.py index da179d7c..7ac6cfe8 100644 --- a/sickgear/nzbSplitter.py +++ b/sickgear/nzbSplitter.py @@ -40,7 +40,7 @@ SUBJECT_FN_MATCHER = re.compile(r'"([^"]*)"') RE_NORMAL_NAME = re.compile(r'\.\w{1,5}$') -def platform_encode(p): +def _platform_encode(p): """ Return Unicode name, if not already Unicode, decode with UTF-8 or latin1 """ try: return decode_str(p) @@ -48,17 +48,17 @@ def platform_encode(p): return decode_str(p, sickgear.SYS_ENCODING, errors='replace').replace('?', '!') -def name_extractor(subject): +def _name_extractor(subject): """ Try to extract a file name from a subject line, return `subject` if in doubt """ result = subject for name in re.findall(SUBJECT_FN_MATCHER, subject): name = name.strip(' "') if name and RE_NORMAL_NAME.search(name): result = name - return platform_encode(result) + return _platform_encode(result) -def getSeasonNZBs(name, url_data, season): +def _get_season_nzbs(name, url_data, season): """ :param name: name @@ -71,31 +71,31 @@ def getSeasonNZBs(name, url_data, season): :rtype: Tuple[Dict, AnyStr] """ try: - showXML = etree.ElementTree(etree.XML(url_data)) + show_xml = etree.ElementTree(etree.XML(url_data)) except SyntaxError: logger.log(u'Unable to parse the XML of %s, not splitting it' % name, logger.ERROR) return {}, '' filename = name.replace('.nzb', '') - nzbElement = showXML.getroot() + nzb_element = show_xml.getroot() regex = r'([\w\._\ ]+)[\._ ]S%02d[\._ ]([\w\._\-\ ]+)' % season - sceneNameMatch = re.search(regex, filename, re.I) - if sceneNameMatch: - showName, qualitySection = sceneNameMatch.groups() + scene_name_match = re.search(regex, filename, re.I) + if scene_name_match: + show_name, quality_section = scene_name_match.groups() else: logger.log('%s - Not a valid season pack scene name. If it\'s a valid one, log a bug.' % name, logger.ERROR) return {}, '' - regex = r'(%s[\._]S%02d(?:[E0-9]+)\.[\w\._]+)' % (re.escape(showName), season) + regex = r'(%s[\._]S%02d(?:[E0-9]+)\.[\w\._]+)' % (re.escape(show_name), season) regex = regex.replace(' ', '.') ep_files = {} xmlns = None - for cur_file in list(nzbElement): + for cur_file in list(nzb_element): if not isinstance(cur_file.tag, string_types): continue xmlns_match = re.match(r'[{](https?://[A-Za-z0-9_./]+/nzb)[}]file', cur_file.tag) @@ -108,7 +108,7 @@ def getSeasonNZBs(name, url_data, season): # print curFile.get("subject"), "doesn't match", regex continue cur_ep = match.group(1) - fn = name_extractor(cur_file.get('subject', '')) + fn = _name_extractor(cur_file.get('subject', '')) if cur_ep == re.sub(r'\+\d+\.par2$', '', fn, flags=re.I): bn, ext = os.path.splitext(fn) cur_ep = re.sub(r'\.(part\d+|vol\d+(\+\d+)?)$', '', bn, flags=re.I) @@ -126,7 +126,7 @@ def getSeasonNZBs(name, url_data, season): return ep_files, xmlns -def createNZBString(file_elements, xmlns): +def _create_nzb_string(file_elements, xmlns): """ :param file_elements: first element @@ -134,17 +134,17 @@ def createNZBString(file_elements, xmlns): :return: :rtype: AnyStr """ - rootElement = etree.Element("nzb") + root_element = etree.Element("nzb") if xmlns: - rootElement.set("xmlns", xmlns) + root_element.set("xmlns", xmlns) for curFile in file_elements: - rootElement.append(stripNS(curFile, xmlns)) + root_element.append(_strip_ns(curFile, xmlns)) - return etree.tostring(rootElement, encoding='utf-8') + return etree.tostring(root_element, encoding='utf-8') -def saveNZB(nzb_name, nzb_string): +def _save_nzb(nzb_name, nzb_string): """ :param nzb_name: nzb name @@ -160,15 +160,15 @@ def saveNZB(nzb_name, nzb_string): logger.log(u'Unable to save NZB: ' + ex(e), logger.ERROR) -def stripNS(element, ns): +def _strip_ns(element, ns): element.tag = element.tag.replace("{" + ns + "}", "") for curChild in list(element): - stripNS(curChild, ns) + _strip_ns(curChild, ns) return element -def splitResult(result): +def split_result(result): """ :param result: search result @@ -195,7 +195,7 @@ def splitResult(result): # bust it up season = parse_result.season_number if None is not parse_result.season_number else 1 - separate_nzbs, xmlns = getSeasonNZBs(result.name, resp, season) + separate_nzbs, xmlns = _get_season_nzbs(result.name, resp, season) result_list = [] @@ -246,7 +246,7 @@ def splitResult(result): nzb_result.provider = result.provider nzb_result.quality = result.quality nzb_result.show_obj = result.show_obj - nzb_result.extraInfo = [createNZBString(separate_nzbs[new_nzb], xmlns)] + nzb_result.extraInfo = [_create_nzb_string(separate_nzbs[new_nzb], xmlns)] result_list.append(nzb_result) diff --git a/sickgear/people_queue.py b/sickgear/people_queue.py index 77a3716e..0e99721c 100644 --- a/sickgear/people_queue.py +++ b/sickgear/people_queue.py @@ -154,7 +154,7 @@ class PeopleQueueActions(object): class PeopleQueueItem(generic_queue.QueueItem): def __init__(self, action_id, show_obj, uid=None, force=False, **kwargs): - # type: (integer_types, TVShow, AnyStr, bool, Dict) -> PeopleQueueItem + # type: (integer_types, TVShow, AnyStr, bool, Dict) -> None """ :param action_id: @@ -172,7 +172,7 @@ class PeopleQueueItem(generic_queue.QueueItem): class CastQueueItem(PeopleQueueItem): def __init__(self, show_obj, show_info_cast=None, uid=None, force=False, scheduled_update=False, switch=False, **kwargs): - # type: (TVShow, CastList, AnyStr, bool, bool, bool, Dict) -> CastQueueItem + # type: (TVShow, CastList, AnyStr, bool, bool, bool, Dict) -> None """ :param show_obj: show obj diff --git a/sickgear/postProcessor.py b/sickgear/postProcessor.py index ac08f1df..48501e63 100644 --- a/sickgear/postProcessor.py +++ b/sickgear/postProcessor.py @@ -762,7 +762,7 @@ class PostProcessor(object): # if there is a quality available in the status then we don't need to bother guessing from the filename if ep_obj.status in common.Quality.SNATCHED_ANY: - old_status, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status) + old_status, ep_quality = common.Quality.split_composite_status(ep_obj.status) if common.Quality.UNKNOWN != ep_quality: self._log( u'Using "%s" quality from the old status' % common.Quality.qualityStrings[ep_quality], @@ -779,7 +779,7 @@ class PostProcessor(object): if not cur_name: continue - ep_quality = common.Quality.nameQuality(cur_name, ep_obj.show_obj.is_anime) + ep_quality = common.Quality.name_quality(cur_name, ep_obj.show_obj.is_anime) quality_log = u' "%s" quality parsed from the %s %s'\ % (common.Quality.qualityStrings[ep_quality], thing, cur_name) @@ -790,14 +790,14 @@ class PostProcessor(object): else: self._log(u'Found' + quality_log, logger.DEBUG) - ep_quality = common.Quality.fileQuality(self.file_path) + ep_quality = common.Quality.file_quality(self.file_path) if common.Quality.UNKNOWN != ep_quality: self._log(u'Using "%s" quality parsed from the metadata file content of %s' % (common.Quality.qualityStrings[ep_quality], self.file_name), logger.DEBUG) return ep_quality # Try guessing quality from the file name - ep_quality = common.Quality.assumeQuality(self.file_name) + ep_quality = common.Quality.assume_quality(self.file_name) self._log(u'Using guessed "%s" quality from the file name %s' % (common.Quality.qualityStrings[ep_quality], self.file_name), logger.DEBUG) @@ -889,7 +889,7 @@ class PostProcessor(object): self._log(u'SickGear snatched this episode, marking it safe to replace', logger.DEBUG) return True - old_ep_status, old_ep_quality = common.Quality.splitCompositeStatus(ep_obj.status) + old_ep_status, old_ep_quality = common.Quality.split_composite_status(ep_obj.status) # if old episode is not downloaded/archived then it's safe if common.DOWNLOADED != old_ep_status and common.ARCHIVED != old_ep_status: @@ -1002,10 +1002,10 @@ class PostProcessor(object): cur_ep_obj.release_name = self.release_name or '' - any_qualities, best_qualities = common.Quality.splitQuality(cur_ep_obj.show_obj.quality) - cur_status, cur_quality = common.Quality.splitCompositeStatus(cur_ep_obj.status) + any_qualities, best_qualities = common.Quality.split_quality(cur_ep_obj.show_obj.quality) + cur_status, cur_quality = common.Quality.split_composite_status(cur_ep_obj.status) - cur_ep_obj.status = common.Quality.compositeStatus( + cur_ep_obj.status = common.Quality.composite_status( **({'status': common.DOWNLOADED, 'quality': quality}, {'status': common.ARCHIVED, 'quality': quality}) [cur_ep_obj.status in common.Quality.SNATCHED_BEST or @@ -1111,7 +1111,7 @@ class PostProcessor(object): # set the status of the episodes # for cur_ep_obj in [ep_obj] + ep_obj.related_ep_obj: - # cur_ep_obj.status = common.Quality.compositeStatus(common.SNATCHED, new_ep_quality) + # cur_ep_obj.status = common.Quality.composite_status(common.SNATCHED, new_ep_quality) # if the show directory doesn't exist then make it if allowed if not os.path.isdir(ep_obj.show_obj.location) and sickgear.CREATE_MISSING_SHOW_DIRS: diff --git a/sickgear/properFinder.py b/sickgear/properFinder.py index 5c3f899a..9d66fd5d 100644 --- a/sickgear/properFinder.py +++ b/sickgear/properFinder.py @@ -73,7 +73,7 @@ def search_propers(provider_proper_obj=None): proper_sch = sickgear.proper_finder_scheduler if None is proper_sch.start_time: - run_in = proper_sch.lastRun + proper_sch.cycleTime - datetime.datetime.now() + run_in = proper_sch.last_run + proper_sch.cycle_time - datetime.datetime.now() run_at = ', next check ' if datetime.timedelta() > run_in: run_at += 'imminent' @@ -131,7 +131,7 @@ def get_old_proper_level(show_obj, tvid, prodid, season, episode_numbers, old_st [tvid, prodid, season, episode]) if not result or not isinstance(result[0]['resource'], string_types) or not result[0]['resource']: continue - nq = Quality.sceneQuality(result[0]['resource'], show_obj.is_anime) + nq = Quality.scene_quality(result[0]['resource'], show_obj.is_anime) if nq != new_quality: continue try: @@ -214,7 +214,7 @@ def load_webdl_types(): def _search_provider(cur_provider, provider_propers, aired_since_shows, recent_shows, recent_anime): # type: (GenericProvider, List, datetime.datetime, List[Tuple[int, int]], List[Tuple[int, int]]) -> None try: - # we need to extent the referenced list from parameter to update the original var + # we need to extend the referenced list from parameter to update the original var provider_propers.extend(cur_provider.find_propers(search_date=aired_since_shows, shows=recent_shows, anime=recent_anime)) except AuthException as e: @@ -253,7 +253,7 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime # 2. native proper search: active search enabled providers provider_list = list(filter( lambda p: p.is_active() and (p.enable_recentsearch, p.enable_backlog)[None is proper_dict], - sickgear.providers.sortedProviderList())) + sickgear.providers.sorted_sources())) search_threads = [] if None is proper_dict: @@ -362,8 +362,8 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime # only keep the Proper if we already retrieved the same quality ep (don't get better/worse ones) # check if we want this release: same quality as current, current has correct status # restrict other release group releases to Proper's - old_status, old_quality = Quality.splitCompositeStatus(int(sql_result[0]['status'])) - cur_proper.quality = Quality.nameQuality(cur_proper.name, parse_result.is_anime) + old_status, old_quality = Quality.split_composite_status(int(sql_result[0]['status'])) + cur_proper.quality = Quality.name_quality(cur_proper.name, parse_result.is_anime) cur_proper.is_repack, cur_proper.properlevel = Quality.get_proper_level( parse_result.extra_info_no_name(), parse_result.version, parse_result.is_anime, check_is_repack=True) cur_proper.proper_level = cur_proper.properlevel # local non global value @@ -631,7 +631,7 @@ def get_needed_qualites(needed=None): continue ep_obj = show_obj.get_episode(season=cur_result['season'], episode=cur_result['episode']) if ep_obj: - ep_status, ep_quality = Quality.splitCompositeStatus(ep_obj.status) + ep_status, ep_quality = Quality.split_composite_status(ep_obj.status) if ep_status in SNATCHED_ANY + [DOWNLOADED, ARCHIVED]: needed.check_needed_qualities([ep_quality]) @@ -699,7 +699,7 @@ def _set_last_proper_search(when): def next_proper_timeleft(): - return sickgear.proper_finder_scheduler.timeLeft() + return sickgear.proper_finder_scheduler.time_left() def get_last_proper_search(): diff --git a/sickgear/providers/__init__.py b/sickgear/providers/__init__.py index 5ba75287..1695162f 100644 --- a/sickgear/providers/__init__.py +++ b/sickgear/providers/__init__.py @@ -29,6 +29,7 @@ if False: from typing import AnyStr, List, Union from .generic import GenericProvider, NZBProvider, TorrentProvider +# noinspection PyUnresolvedReferences __all__ = [ # usenet 'filesharingtalk', @@ -55,41 +56,41 @@ for module in __all__: raise e -def sortedProviderList(): +def sorted_sources(): # type: (...) -> List[Union[GenericProvider, NZBProvider, TorrentProvider]] """ return sorted provider list :return: sorted list of providers """ - initialList = sickgear.providerList + sickgear.newznabProviderList + sickgear.torrentRssProviderList - providerDict = dict(zip([x.get_id() for x in initialList], initialList)) + initial_list = sickgear.provider_list + sickgear.newznab_providers + sickgear.torrent_rss_providers + provider_dict = dict(zip([x.get_id() for x in initial_list], initial_list)) - newList = [] + new_list = [] # add all modules in the priority list, in order for curModule in sickgear.PROVIDER_ORDER: - if curModule in providerDict: - newList.append(providerDict[curModule]) + if curModule in provider_dict: + new_list.append(provider_dict[curModule]) if not sickgear.PROVIDER_ORDER: - nzb = list(filter(lambda p: p.providerType == generic.GenericProvider.NZB, itervalues(providerDict))) - tor = list(filter(lambda p: p.providerType != generic.GenericProvider.NZB, itervalues(providerDict))) - newList = sorted(filter(lambda p: not p.anime_only, nzb), key=lambda v: v.get_id()) + \ + nzb = list(filter(lambda p: p.providerType == generic.GenericProvider.NZB, itervalues(provider_dict))) + tor = list(filter(lambda p: p.providerType != generic.GenericProvider.NZB, itervalues(provider_dict))) + new_list = sorted(filter(lambda p: not p.anime_only, nzb), key=lambda v: v.get_id()) + \ sorted(filter(lambda p: not p.anime_only, tor), key=lambda v: v.get_id()) + \ sorted(filter(lambda p: p.anime_only, nzb), key=lambda v: v.get_id()) + \ sorted(filter(lambda p: p.anime_only, tor), key=lambda v: v.get_id()) # add any modules that are missing from that list - for curModule in providerDict: - if providerDict[curModule] not in newList: - newList.append(providerDict[curModule]) + for curModule in provider_dict: + if provider_dict[curModule] not in new_list: + new_list.append(provider_dict[curModule]) - return newList + return new_list -def makeProviderList(): - return [x.provider for x in [getProviderModule(y) for y in __all__] if x] +def provider_modules(): + return [x.provider for x in [_get_module_by_name(y) for y in __all__] if x] def generic_provider_name(n): @@ -102,7 +103,7 @@ def generic_provider_url(u): return u.strip().strip('/').lower().replace('https', 'http') -def make_unique_list(p_list, d_list=None): +def _make_unique_list(p_list, d_list=None): # type: (List, List) -> List """ remove provider duplicates @@ -135,32 +136,32 @@ def make_unique_list(p_list, d_list=None): return new_p_list -def getNewznabProviderList(data): +def newznab_source_list(data): # type: (AnyStr) -> List - defaultList = [makeNewznabProvider(x) for x in getDefaultNewznabProviders().split('!!!')] - providerList = make_unique_list(list(filter(lambda _x: _x, [makeNewznabProvider(x) for x in data.split('!!!')])), - defaultList) + default_list = [_create_newznab_source(x) for x in _default_newznab_sources().split('!!!')] + provider_list = _make_unique_list(list(filter( + lambda _x: _x, [_create_newznab_source(x) for x in data.split('!!!')])), default_list) - providerDict = dict(zip([x.name for x in providerList], providerList)) + provider_dict = dict(zip([x.name for x in provider_list], provider_list)) - for curDefault in defaultList: + for curDefault in default_list: if not curDefault: continue - if curDefault.name not in providerDict: + if curDefault.name not in provider_dict: curDefault.default = True - providerList.append(curDefault) + provider_list.append(curDefault) else: - providerDict[curDefault.name].default = True + provider_dict[curDefault.name].default = True for k in ('name', 'url', 'needs_auth', 'search_mode', 'search_fallback', 'enable_recentsearch', 'enable_backlog', 'enable_scheduled_backlog', 'server_type'): - setattr(providerDict[curDefault.name], k, getattr(curDefault, k)) + setattr(provider_dict[curDefault.name], k, getattr(curDefault, k)) - return list(filter(lambda _x: _x, providerList)) + return list(filter(lambda _x: _x, provider_list)) -def makeNewznabProvider(config_string): +def _create_newznab_source(config_string): if not config_string: return None @@ -181,19 +182,19 @@ def makeNewznabProvider(config_string): newznab_module = sys.modules['sickgear.providers.newznab'] - newProvider = newznab_module.NewznabProvider(name, url, **params) - newProvider.enabled = '1' == enabled + new_provider = newznab_module.NewznabProvider(name, url, **params) + new_provider.enabled = '1' == enabled - return newProvider + return new_provider -def getTorrentRssProviderList(data): - providerList = list(filter(lambda _x: _x, [makeTorrentRssProvider(x) for x in data.split('!!!')])) +def torrent_rss_source_list(data): + provider_list = list(filter(lambda _x: _x, [_create_torrent_rss_source(x) for x in data.split('!!!')])) - return list(filter(lambda _x: _x, providerList)) + return list(filter(lambda _x: _x, provider_list)) -def makeTorrentRssProvider(config_string): +def _create_torrent_rss_source(config_string): if not config_string: return None @@ -217,25 +218,27 @@ def makeTorrentRssProvider(config_string): return None try: - torrentRss = sys.modules['sickgear.providers.rsstorrent'] + torrent_rss = sys.modules['sickgear.providers.rsstorrent'] except (BaseException, Exception): return - newProvider = torrentRss.TorrentRssProvider(name, url, cookies, search_mode, search_fallback, enable_recentsearch, - enable_backlog) - newProvider.enabled = '1' == enabled + new_provider = torrent_rss.TorrentRssProvider(name, url, cookies, search_mode, search_fallback, enable_recentsearch, + enable_backlog) + new_provider.enabled = '1' == enabled - return newProvider + return new_provider -def getDefaultNewznabProviders(): - return '!!!'.join(['NZBgeek|https://api.nzbgeek.info/||5030,5040|0|eponly|0|0|0', - 'DrunkenSlug|https://api.drunkenslug.com/||5030,5040|0|eponly|0|0|0', - 'NinjaCentral|https://ninjacentral.co.za/||5030,5040|0|eponly|0|0|0', - ]) +def _default_newznab_sources(): + return '!!!'.join([ + '|'.join(_src) for _src in + (['NZBgeek', 'https://api.nzbgeek.info/', '', '5030,5040', '0', 'eponly', '0', '0', '0'], + ['DrunkenSlug', 'https://api.drunkenslug.com/', '', '5030,5040', '0', 'eponly', '0', '0', '0'], + ['NinjaCentral', 'https://ninjacentral.co.za/', '', '5030,5040', '0', 'eponly', '0', '0', '0'], + )]) -def getProviderModule(name): +def _get_module_by_name(name): prefix, cprov, name = 'sickgear.providers.', 'motsuc'[::-1], name.lower() if name in __all__ and prefix + name in sys.modules: return sys.modules[prefix + name] @@ -244,11 +247,11 @@ def getProviderModule(name): raise Exception('Can\'t find %s%s in providers' % (prefix, name)) -def getProviderClass(provider_id): - providerMatch = [x for x in - sickgear.providerList + sickgear.newznabProviderList + sickgear.torrentRssProviderList if - provider_id == x.get_id()] +def get_by_id(provider_id): + provider_match = [x for x in + sickgear.provider_list + sickgear.newznab_providers + sickgear.torrent_rss_providers if + provider_id == x.get_id()] - if 1 != len(providerMatch): + if 1 != len(provider_match): return None - return providerMatch[0] + return provider_match[0] diff --git a/sickgear/providers/btn.py b/sickgear/providers/btn.py index 7af84475..be0fb5da 100644 --- a/sickgear/providers/btn.py +++ b/sickgear/providers/btn.py @@ -367,7 +367,7 @@ class BTNCache(tvcache.TVCache): def _cache_data(self, **kwargs): - return self.provider.cache_data(age=self._getLastUpdate().timetuple(), min_time=self.update_iv) + return self.provider.cache_data(age=self._get_last_update().timetuple(), min_time=self.update_iv) provider = BTNProvider() diff --git a/sickgear/providers/generic.py b/sickgear/providers/generic.py index dad4c46f..a75600be 100644 --- a/sickgear/providers/generic.py +++ b/sickgear/providers/generic.py @@ -502,7 +502,7 @@ class GenericProvider(object): if log_warning: # Ensure provider name output (e.g. when displaying config/provs) instead of e.g. thread "Tornado" prepend = ('[%s] :: ' % self.name, '')[any([x.name in threading.current_thread().name - for x in sickgear.providers.sortedProviderList()])] + for x in sickgear.providers.sorted_sources()])] logger.log('%sToo many requests reached at %s, waiting for %s' % ( prepend, self.fmt_delta(self.tmr_limit_time), self.fmt_delta(time_left)), logger.WARNING) return use_tmr_limit @@ -544,8 +544,8 @@ class GenericProvider(object): :param url: Address where to fetch data from :param skip_auth: Skip authentication check of provider if True :param use_tmr_limit: An API limit can be +ve before a fetch, but unwanted, set False to short should_skip - :param args: params to pass-through to get_url - :param kwargs: keyword params to pass-through to get_url + :param args: params to pass through to get_url + :param kwargs: keyword params to pass through to get_url :return: None or data fetched from URL """ data = None @@ -641,7 +641,7 @@ class GenericProvider(object): :param name: name :return: """ - return re.sub(r'[^\w\d_]', '_', name.strip().lower()) + return re.sub(r'[^\w_]', '_', name.strip().lower()) def image_name(self, *default_name): # type: (...) -> AnyStr @@ -672,7 +672,7 @@ class GenericProvider(object): rxc_delim = re.compile(r'[&;]') rxc_skip_key = re.compile(r'clearance') - for cur_p in sickgear.providers.sortedProviderList(): + for cur_p in sickgear.providers.sorted_sources(): pid = cur_p.get_id() auths = set([]) for cur_kt in ['password', 'passkey', 'api_key', 'key', 'digest', 'cookies', 'hash']: @@ -755,7 +755,7 @@ class GenericProvider(object): def is_enabled(self): # type: (...) -> bool """ - This should be overridden and should return the config setting eg. sickgear.MYPROVIDER + This should be overridden and should return the config setting e.g. sickgear.MYPROVIDER """ return self.enabled @@ -804,7 +804,7 @@ class GenericProvider(object): try: btih = None try: - btih = re.findall(r'urn:btih:([\w]{32,40})', result.url)[0] + btih = re.findall(r'urn:btih:(\w{32,40})', result.url)[0] if 32 == len(btih): btih = make_btih(btih) except (BaseException, Exception): @@ -927,7 +927,7 @@ class GenericProvider(object): def search_rss(self, ep_obj_list): # type: (List[TVEpisode]) -> Dict[TVEpisode, SearchResult] - return self.cache.findNeededEpisodes(ep_obj_list) + return self.cache.find_needed_episodes(ep_obj_list) def get_quality(self, item, anime=False): # type: (etree.Element, bool) -> int @@ -939,7 +939,7 @@ class GenericProvider(object): :return: a Quality value obtained from the node's data """ (title, url) = self._title_and_url(item) - quality = Quality.sceneQuality(title, anime) + quality = Quality.scene_quality(title, anime) return quality def _search_provider(self, search_params, search_mode='eponly', epcount=0, age=0, **kwargs): @@ -1008,7 +1008,7 @@ class GenericProvider(object): all_cells = all_cells if any(all_cells) else header_row.find_all('td') headers = [re.sub( - r'[\s]+', '', + r'\s+', '', ((any([cell.get_text()]) and any([rc[x].search(cell.get_text()) for x in iterkeys(rc)]) and cell.get_text()) or (cell.attrs.get('id') and any([rc[x].search(cell['id']) for x in iterkeys(rc)]) and cell['id']) or (cell.attrs.get('title') and any([rc[x].search(cell['title']) for x in iterkeys(rc)]) and cell['title']) @@ -1103,7 +1103,7 @@ class GenericProvider(object): search_list = [] for cur_ep_obj in ep_obj_list: # search cache for episode result - cache_result = self.cache.searchCache(cur_ep_obj, manual_search) # type: List[SearchResult] + cache_result = self.cache.search_cache(cur_ep_obj, manual_search) # type: List[SearchResult] if cache_result: if cur_ep_obj.episode not in results: results[cur_ep_obj.episode] = cache_result @@ -1348,7 +1348,7 @@ class GenericProvider(object): :param kwargs: :return: """ - results = self.cache.listPropers(search_date) + results = self.cache.list_propers(search_date) return [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show_obj) for x in results] @@ -1458,7 +1458,7 @@ class GenericProvider(object): except IndexError: return None try: - value *= 1024 ** ['b', 'k', 'm', 'g', 't'].index(re.findall('([tgmk])[i]?b', size_dim.lower())[0]) + value *= 1024 ** ['b', 'k', 'm', 'g', 't'].index(re.findall('([tgmk])i?b', size_dim.lower())[0]) except IndexError: pass return int(math.ceil(value)) @@ -1531,7 +1531,7 @@ class NZBProvider(GenericProvider): :param kwargs: :return: """ - cache_results = self.cache.listPropers(search_date) + cache_results = self.cache.list_propers(search_date) results = [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show_obj) for x in cache_results] @@ -1708,7 +1708,7 @@ class TorrentProvider(GenericProvider): else: # noinspection PyUnresolvedReferences name = item.title - return Quality.sceneQuality(name, anime) + return Quality.scene_quality(name, anime) @staticmethod def _reverse_quality(quality): @@ -1829,7 +1829,7 @@ class TorrentProvider(GenericProvider): prefix = ([prefix], prefix)[isinstance(prefix, list)] search_params = [] - crop = re.compile(r'([.\s])(?:\1)+') + crop = re.compile(r'([.\s])\1+') for name in get_show_names_all_possible(self.show_obj, scenify=process_name and getattr(self, 'scene', True), season=season): for detail in ep_detail: @@ -1965,7 +1965,7 @@ class TorrentProvider(GenericProvider): seen_attr = 'PROVIDER_SEEN' setattr(sickgear, seen_attr, list(filter(lambda u: self.__module__ not in u, - getattr(sickgear, seen_attr, [])))) + getattr(sickgear, seen_attr, [])))) self.failure_count = 3 * bool(failure_count) if self.should_skip(): @@ -2160,7 +2160,7 @@ class TorrentProvider(GenericProvider): if self.should_skip(log_warning=False): break - proper_check = re.compile(r'(?i)(?:%s)' % clean_term.sub('', proper_term)) + proper_check = re.compile(r'(?i)%s' % clean_term.sub('', proper_term)) for item in items: if self.should_skip(log_warning=False): break diff --git a/sickgear/providers/newznab.py b/sickgear/providers/newznab.py index 69fbfdd4..fc701941 100644 --- a/sickgear/providers/newznab.py +++ b/sickgear/providers/newznab.py @@ -347,7 +347,7 @@ class NewznabProvider(generic.NZBProvider): caps[NewznabConstants.SEARCH_SEASON] = 'season' if NewznabConstants.SEARCH_EPISODE not in caps or not caps.get(NewznabConstants.SEARCH_EPISODE): caps[NewznabConstants.SEARCH_TEXT] = 'ep' - if (TVINFO_TVRAGE not in caps or not caps.get(TVINFO_TVRAGE)): + if TVINFO_TVRAGE not in caps or not caps.get(TVINFO_TVRAGE): caps[TVINFO_TVRAGE] = 'rid' if NewznabConstants.SEARCH_TEXT not in caps or not caps.get(NewznabConstants.SEARCH_TEXT): caps[NewznabConstants.SEARCH_TEXT] = 'q' @@ -645,7 +645,7 @@ class NewznabProvider(generic.NZBProvider): if not getattr(s, 'wanted_quality', None): # this should not happen, the creation is missing for the search in this case logger.log('wanted_quality property was missing for search, creating it', logger.WARNING) - ep_status, ep_quality = Quality.splitCompositeStatus(ep_obj.status) + ep_status, ep_quality = Quality.split_composite_status(ep_obj.status) s.wanted_quality = get_wanted_qualities(ep_obj, ep_status, ep_quality, unaired=True) needed.check_needed_qualities(s.wanted_quality) @@ -682,14 +682,14 @@ class NewznabProvider(generic.NZBProvider): needed.check_needed_types(ep_obj.show_obj) if not ep_obj.show_obj.is_anime and not ep_obj.show_obj.is_sports: if not getattr(ep_obj, 'wanted_quality', None): - ep_status, ep_quality = Quality.splitCompositeStatus(ep_obj.status) + ep_status, ep_quality = Quality.split_composite_status(ep_obj.status) ep_obj.wanted_quality = get_wanted_qualities(ep_obj, ep_status, ep_quality, unaired=True) needed.check_needed_qualities(ep_obj.wanted_quality) else: if not ep_obj.show_obj.is_anime and not ep_obj.show_obj.is_sports: for cur_ep_obj in ep_obj_list: if not getattr(cur_ep_obj, 'wanted_quality', None): - ep_status, ep_quality = Quality.splitCompositeStatus(cur_ep_obj.status) + ep_status, ep_quality = Quality.split_composite_status(cur_ep_obj.status) cur_ep_obj.wanted_quality = get_wanted_qualities(cur_ep_obj, ep_status, ep_quality, unaired=True) needed.check_needed_qualities(cur_ep_obj.wanted_quality) @@ -733,7 +733,7 @@ class NewznabProvider(generic.NZBProvider): continue # search cache for episode result - cache_result = self.cache.searchCache(ep_obj, manual_search) + cache_result = self.cache.search_cache(ep_obj, manual_search) if cache_result: if ep_obj.episode not in results: results[ep_obj.episode] = cache_result @@ -1070,7 +1070,7 @@ class NewznabProvider(generic.NZBProvider): :param kwargs: :return: """ - cache_results = self.cache.listPropers(search_date) + cache_results = self.cache.list_propers(search_date) results = [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show_obj) for x in cache_results] @@ -1183,10 +1183,10 @@ class NewznabCache(tvcache.TVCache): root = elem return root, ns - def updateCache(self, - needed=NeededQualities(need_all=True), # type: NeededQualities - **kwargs - ): + def update_cache(self, + needed=NeededQualities(need_all=True), # type: NeededQualities + **kwargs + ): """ :param needed: needed qualites class @@ -1195,7 +1195,7 @@ class NewznabCache(tvcache.TVCache): if 4489 != sickgear.RECENTSEARCH_INTERVAL or self.should_update(): n_spaces = {} try: - check = self._checkAuth() + check = self.check_auth() if isinstance(check, bool) and not check: items = None else: @@ -1205,12 +1205,12 @@ class NewznabCache(tvcache.TVCache): items = None if items: - self._clearCache() + self.clear_cache() # parse data cl = [] for item in items: - ci = self._parseItem(n_spaces, item) + ci = self.parse_item(n_spaces, item) if None is not ci: cl.append(ci) @@ -1219,7 +1219,7 @@ class NewznabCache(tvcache.TVCache): my_db.mass_action(cl) # set updated as time the attempt to fetch data is - self.setLastUpdate() + self.set_last_update() @staticmethod def parse_ids(item, ns): @@ -1240,7 +1240,7 @@ class NewznabCache(tvcache.TVCache): return ids # overwrite method with that parses the rageid from the newznab feed - def _parseItem(self, + def parse_item(self, ns, # type: Dict item # type: etree.Element ): # type: (...) -> Union[List[AnyStr, List[Any]], None] diff --git a/sickgear/providers/snowfl.py b/sickgear/providers/snowfl.py index eb7986cc..25f46c3a 100644 --- a/sickgear/providers/snowfl.py +++ b/sickgear/providers/snowfl.py @@ -164,7 +164,7 @@ class SnowflProvider(generic.TorrentProvider): from sickgear import providers if 'torlock' in url.lower(): prov = next(filter(lambda p: 'torlock' == p.name.lower(), (filter( - lambda sp: sp.providerType == self.providerType, providers.sortedProviderList())))) + lambda sp: sp.providerType == self.providerType, providers.sorted_sources())))) state = prov.enabled prov.enabled = True _ = prov.url diff --git a/sickgear/sab.py b/sickgear/sab.py index f6d5496a..8efa531b 100644 --- a/sickgear/sab.py +++ b/sickgear/sab.py @@ -118,7 +118,7 @@ def access_method(host): def test_authentication(host=None, username=None, password=None, apikey=None): """ - Sends a simple API request to SAB to determine if the given connection information is connect + Sends a simple API request to SAB to determine if the given connection information is correct Returns: A tuple containing the success boolean and a message :param host: The host where SAB is running (incl port) diff --git a/sickgear/scene_exceptions.py b/sickgear/scene_exceptions.py index 9aa9591d..b7ee204a 100644 --- a/sickgear/scene_exceptions.py +++ b/sickgear/scene_exceptions.py @@ -318,7 +318,7 @@ def retrieve_exceptions(): if cl: my_db.mass_action(cl) - name_cache.buildNameCache(update_only_scene=True) + name_cache.build_name_cache(update_only_scene=True) # since this could invalidate the results of the cache we clear it out after updating if changed_exceptions: @@ -369,7 +369,7 @@ def update_scene_exceptions(tvid, prodid, scene_exceptions): ' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)', [tvid, prodid, cur_exception, cur_season]) - sickgear.name_cache.buildNameCache(update_only_scene=True) + sickgear.name_cache.build_name_cache(update_only_scene=True) def _custom_exceptions_fetcher(): diff --git a/sickgear/scene_numbering.py b/sickgear/scene_numbering.py index a93d4776..cccb4abc 100644 --- a/sickgear/scene_numbering.py +++ b/sickgear/scene_numbering.py @@ -45,8 +45,8 @@ def get_scene_numbering(tvid, prodid, season, episode, fallback_to_xem=True, sho returns the TVDB numbering. (so the return values will always be set) - kwargs['scene_result']: type: Optional[List[Row]] passed thru - kwargs['show_result']: type: Optional[List[Row]] passed thru + kwargs['scene_result']: type: Optional[List[Row]] passed through + kwargs['show_result']: type: Optional[List[Row]] passed through :param tvid: tvid :type tvid: int @@ -134,8 +134,8 @@ def get_scene_absolute_numbering(tvid, prodid, absolute_number, season, episode, returns the TVDB numbering. (so the return values will always be set) - kwargs['scene_result']: type: Optional[List[Row]] passed thru - kwargs['show_result']: type: Optional[List[Row]] passed thru + kwargs['scene_result']: type: Optional[List[Row]] passed through + kwargs['show_result']: type: Optional[List[Row]] passed through :param tvid: tvid :type tvid: int diff --git a/sickgear/scheduler.py b/sickgear/scheduler.py index b310ef81..990df34c 100644 --- a/sickgear/scheduler.py +++ b/sickgear/scheduler.py @@ -26,17 +26,17 @@ from exceptions_helper import ex class Scheduler(threading.Thread): - def __init__(self, action, cycleTime=datetime.timedelta(minutes=10), run_delay=datetime.timedelta(minutes=0), - start_time=None, threadName="ScheduledThread", silent=True, prevent_cycle_run=None, paused=False): + def __init__(self, action, cycle_time=datetime.timedelta(minutes=10), run_delay=datetime.timedelta(minutes=0), + start_time=None, thread_name="ScheduledThread", silent=True, prevent_cycle_run=None, paused=False): super(Scheduler, self).__init__() - self.lastRun = datetime.datetime.now() + run_delay - cycleTime + self.last_run = datetime.datetime.now() + run_delay - cycle_time self.action = action - self.cycleTime = cycleTime + self.cycle_time = cycle_time self.start_time = start_time self.prevent_cycle_run = prevent_cycle_run - self.name = threadName + self.name = thread_name self.silent = silent self._stopper = threading.Event() self._unpause = threading.Event() @@ -65,10 +65,10 @@ class Scheduler(threading.Thread): else: self.unpause() - def timeLeft(self): - return self.cycleTime - (datetime.datetime.now() - self.lastRun) + def time_left(self): + return self.cycle_time - (datetime.datetime.now() - self.last_run) - def forceRun(self): + def force_run(self): if not self.action.amActive: self.force = True return True @@ -93,15 +93,15 @@ class Scheduler(threading.Thread): should_run = False # check if interval has passed - if current_time - self.lastRun >= self.cycleTime: + if current_time - self.last_run >= self.cycle_time: # check if wanting to start around certain time taking interval into account if self.start_time: hour_diff = current_time.time().hour - self.start_time.hour - if not hour_diff < 0 and hour_diff < self.cycleTime.seconds // 3600: + if not hour_diff < 0 and hour_diff < self.cycle_time.seconds // 3600: should_run = True else: - # set lastRun to only check start_time after another cycleTime - self.lastRun = current_time + # set last_run to only check start_time after another cycle_time + self.last_run = current_time else: should_run = True @@ -110,13 +110,13 @@ class Scheduler(threading.Thread): if should_run and ((self.prevent_cycle_run is not None and self.prevent_cycle_run()) or getattr(self.action, 'prevent_run', False)): - logger.log(u'%s skipping this cycleTime' % self.name, logger.WARNING) - # set lastRun to only check start_time after another cycleTime - self.lastRun = current_time + logger.log(u'%s skipping this cycle_time' % self.name, logger.WARNING) + # set last_run to only check start_time after another cycle_time + self.last_run = current_time should_run = False if should_run: - self.lastRun = current_time + self.last_run = current_time try: if not self.silent: diff --git a/sickgear/search.py b/sickgear/search.py index c7609512..d7c87fc1 100644 --- a/sickgear/search.py +++ b/sickgear/search.py @@ -165,9 +165,9 @@ def snatch_episode(result, end_status=SNATCHED): for cur_ep_obj in result.ep_obj_list: with cur_ep_obj.lock: if is_first_best_match(cur_ep_obj.status, result): - cur_ep_obj.status = Quality.compositeStatus(SNATCHED_BEST, result.quality) + cur_ep_obj.status = Quality.composite_status(SNATCHED_BEST, result.quality) else: - cur_ep_obj.status = Quality.compositeStatus(end_status, result.quality) + cur_ep_obj.status = Quality.composite_status(end_status, result.quality) item = cur_ep_obj.get_sql() if None is not item: @@ -354,7 +354,7 @@ def is_final_result(result): Checks if the given result is good enough quality that we can stop searching for other ones. :param result: search result to check - :return: If the result is the highest quality in both the any/best quality lists then this function + :return: If the result is the highest quality in both any and best quality lists then this function returns True, if not then it's False """ @@ -362,7 +362,7 @@ def is_final_result(result): show_obj = result.ep_obj_list[0].show_obj - any_qualities, best_qualities = Quality.splitQuality(show_obj.quality) + any_qualities, best_qualities = Quality.split_quality(show_obj.quality) # if there is a download that's higher than this then we definitely need to keep looking if best_qualities and max(best_qualities) > result.quality: @@ -378,11 +378,11 @@ def is_final_result(result): elif best_qualities and max(best_qualities) == result.quality: - # if this is the best download but we have a higher initial download then keep looking + # if this is the best download, but we have a higher initial download then keep looking if any_qualities and max(any_qualities) > result.quality: return False - # if this is the best download and we don't have a higher initial download then we're done + # if this is the best download, and we don't have a higher initial download then we're done return True # if we got here than it's either not on the lists, they're empty, or it's lower than the highest required @@ -392,7 +392,7 @@ def is_final_result(result): def is_first_best_match(ep_status, result): # type: (int, sickgear.classes.SearchResult) -> bool """ - Checks if the given result is a best quality match and if we want to archive the episode on first match. + Checks if the given result is the best quality match and if we want to archive the episode on first match. :param ep_status: current episode object status :param result: search result to check @@ -403,11 +403,11 @@ def is_first_best_match(ep_status, result): result.name, logger.DEBUG) show_obj = result.ep_obj_list[0].show_obj - cur_status, cur_quality = Quality.splitCompositeStatus(ep_status) + cur_status, cur_quality = Quality.split_composite_status(ep_status) - any_qualities, best_qualities = Quality.splitQuality(show_obj.quality) + any_qualities, best_qualities = Quality.split_quality(show_obj.quality) - # if there is a download that's a match to one of our best qualities and + # if there is a download that's a match to one of our best qualities, and # we want to archive the episode then we are done if best_qualities and show_obj.upgrade_once and \ (result.quality in best_qualities and @@ -433,7 +433,7 @@ def set_wanted_aired(ep_obj, # type: TVEpisode :param ep_count_scene: count of episodes in scene seasons :param manual: manual search """ - ep_status, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status) + ep_status, ep_quality = common.Quality.split_composite_status(ep_obj.status) ep_obj.wanted_quality = get_wanted_qualities(ep_obj, ep_status, ep_quality, unaired=unaired, manual=manual) ep_obj.eps_aired_in_season = ep_count.get(ep_obj.season, 0) ep_obj.eps_aired_in_scene_season = ep_count_scene.get( @@ -458,7 +458,7 @@ def get_wanted_qualities(ep_obj, # type: TVEpisode """ if isinstance(ep_obj, TVEpisode): return sickgear.WANTEDLIST_CACHE.get_wantedlist(ep_obj.show_obj.quality, ep_obj.show_obj.upgrade_once, - cur_quality, cur_status, unaired, manual) + cur_quality, cur_status, unaired, manual) return [] @@ -543,7 +543,7 @@ def wanted_episodes(show_obj, # type: TVShow for result in sql_result: ep_obj = show_obj.get_episode(int(result['season']), int(result['episode']), ep_result=ep_sql_result) - cur_status, cur_quality = common.Quality.splitCompositeStatus(ep_obj.status) + cur_status, cur_quality = common.Quality.split_composite_status(ep_obj.status) ep_obj.wanted_quality = get_wanted_qualities(ep_obj, cur_status, cur_quality, unaired=unaired) if not ep_obj.wanted_quality: continue @@ -589,7 +589,7 @@ def search_for_needed_episodes(ep_obj_list): orig_thread_name = threading.current_thread().name - providers = list(filter(lambda x: x.is_active() and x.enable_recentsearch, sickgear.providers.sortedProviderList())) + providers = list(filter(lambda x: x.is_active() and x.enable_recentsearch, sickgear.providers.sorted_sources())) for cur_provider in providers: threading.current_thread().name = '%s :: [%s]' % (orig_thread_name, cur_provider.name) @@ -615,7 +615,7 @@ def search_for_needed_episodes(ep_obj_list): logger.log(u'All found results for %s were rejected.' % cur_ep_obj.pretty_name(), logger.DEBUG) continue - # if it's already in the list (from another provider) and the newly found quality is no better then skip it + # if it's already in the list (from another provider) and the newly found quality is no better, then skip it if cur_ep_obj in found_results and best_result.quality <= found_results[cur_ep_obj].quality: continue @@ -632,7 +632,7 @@ def search_for_needed_episodes(ep_obj_list): found_results[cur_ep_obj] = best_result try: - cur_provider.save_list() + cur_provider.fails.save_list() except (BaseException, Exception): pass @@ -718,7 +718,7 @@ def _search_provider_thread(provider, provider_results, show_obj, ep_obj_list, m logger.log(u'Performing season pack search for %s' % show_obj.unique_name) try: - provider.cache._clearCache() + provider.cache.clear_cache() search_result_list = provider.find_search_results(show_obj, ep_obj_list, search_mode, manual_search, try_other_searches=try_other_searches) if any(search_result_list): @@ -766,7 +766,7 @@ def cache_torrent_file( # type: (...) -> Optional[TorrentSearchResult] cache_file = os.path.join(sickgear.CACHE_DIR or helpers.get_system_temp_dir(), - '%s.torrent' % (helpers.sanitize_filename(search_result.name))) + '%s.torrent' % (helpers.sanitize_filename(search_result.name))) if not helpers.download_file( search_result.url, cache_file, session=search_result.provider.session, failure_monitor=False): @@ -840,7 +840,7 @@ def search_providers( orig_thread_name = threading.current_thread().name - provider_list = [x for x in sickgear.providers.sortedProviderList() if x.is_active() and + provider_list = [x for x in sickgear.providers.sorted_sources() if x.is_active() and getattr(x, 'enable_backlog', None) and (not torrent_only or GenericProvider.TORRENT == x.providerType) and (not scheduled or getattr(x, 'enable_scheduled_backlog', None))] @@ -878,7 +878,7 @@ def search_providers( if provider_id not in found_results or not len(found_results[provider_id]): continue - any_qualities, best_qualities = Quality.splitQuality(show_obj.quality) + any_qualities, best_qualities = Quality.split_quality(show_obj.quality) # pick the best season NZB best_season_result = None @@ -918,8 +918,8 @@ def search_providers( else: any_wanted = True - # if we need every ep in the season and there's nothing better then just download this and - # be done with it (unless single episodes are preferred) + # if we need every ep in the season and there's nothing better, + # then download this and be done with it (unless single episodes are preferred) if all_wanted and highest_quality_overall == best_season_result.quality: logger.log(u'Every episode in this season is needed, downloading the whole %s %s' % (best_season_result.provider.providerType, best_season_result.name)) @@ -938,7 +938,7 @@ def search_providers( logger.log(u'Breaking apart the NZB and adding the individual ones to our results', logger.DEBUG) # if not, break it apart and add them as the lowest priority results - individual_results = nzbSplitter.splitResult(best_season_result) + individual_results = nzbSplitter.split_result(best_season_result) for cur_result in filter( lambda r: r.show_obj == show_obj and show_name_helpers.pass_wordlist_checks( @@ -985,7 +985,7 @@ def search_providers( logger.log(u'Checking usefulness of multi episode result [%s]' % multi_result.name, logger.DEBUG) if sickgear.USE_FAILED_DOWNLOADS and failed_history.has_failed(multi_result.name, multi_result.size, - multi_result.provider.name): + multi_result.provider.name): logger.log(u'Rejecting previously failed multi episode result [%s]' % multi_result.name) continue @@ -1057,7 +1057,7 @@ def search_providers( found_results[provider_id][cur_search_result][0].ep_obj_list[0]) or \ found_results[provider_id][cur_search_result][0].ep_obj_list[0].status if old_status: - status, quality = Quality.splitCompositeStatus(old_status) + status, quality = Quality.split_composite_status(old_status) use_quality_list = (status not in ( common.WANTED, common.FAILED, common.UNAIRED, common.SKIPPED, common.IGNORED, common.UNKNOWN)) @@ -1093,7 +1093,7 @@ def search_providers( best_result.after_get_data_func(best_result) best_result.after_get_data_func = None # consume only once - # add result if its not a duplicate + # add result if it's not a duplicate found = False for i, result in enumerate(final_results): for best_result_ep in best_result.ep_obj_list: diff --git a/sickgear/search_backlog.py b/sickgear/search_backlog.py index eb659770..fa603986 100644 --- a/sickgear/search_backlog.py +++ b/sickgear/search_backlog.py @@ -47,29 +47,29 @@ class BacklogSearchScheduler(scheduler.Scheduler): self.force = True def next_run(self): - if 1 >= self.action._lastBacklog: + if 1 >= self.action.last_backlog: return datetime.date.today() - elif (self.action._lastBacklog + self.action.cycleTime) < datetime.date.today().toordinal(): + elif (self.action.last_backlog + self.action.cycle_time) < datetime.date.today().toordinal(): return datetime.date.today() - return datetime.date.fromordinal(self.action._lastBacklog + self.action.cycleTime) + return datetime.date.fromordinal(self.action.last_backlog + self.action.cycle_time) def next_backlog_timeleft(self): now = datetime.datetime.now() - torrent_enabled = 0 < len([x for x in sickgear.providers.sortedProviderList() if x.is_active() and + torrent_enabled = 0 < len([x for x in sickgear.providers.sorted_sources() if x.is_active() and getattr(x, 'enable_backlog', None) and GenericProvider.TORRENT == x.providerType]) - if now > self.action.nextBacklog or self.action.nextCyleTime != self.cycleTime: - nextruntime = now + self.timeLeft() + if now > self.action.nextBacklog or self.action.nextCyleTime != self.cycle_time: + nextruntime = now + self.time_left() if not torrent_enabled: nextpossibleruntime = (datetime.datetime.fromtimestamp(self.action.last_runtime) + datetime.timedelta(hours=23)) for _ in moves.xrange(5): if nextruntime > nextpossibleruntime: self.action.nextBacklog = nextruntime - self.action.nextCyleTime = self.cycleTime + self.action.nextCyleTime = self.cycle_time break - nextruntime += self.cycleTime + nextruntime += self.cycle_time else: - self.action.nextCyleTime = self.cycleTime + self.action.nextCyleTime = self.cycle_time self.action.nextBacklog = nextruntime return self.action.nextBacklog - now if self.action.nextBacklog > now else datetime.timedelta(seconds=0) @@ -77,8 +77,8 @@ class BacklogSearchScheduler(scheduler.Scheduler): class BacklogSearcher(object): def __init__(self): - self._lastBacklog = self._get_last_backlog() - self.cycleTime = sickgear.BACKLOG_PERIOD + self.last_backlog = self._get_last_backlog() + self.cycle_time = sickgear.BACKLOG_PERIOD self.lock = threading.Lock() self.amActive = False # type: bool self.amPaused = False # type: bool @@ -175,7 +175,7 @@ class BacklogSearcher(object): :param scheduled: scheduled backlog search (can be from webif or scheduler) :return: any provider is active for given backlog """ - return 0 < len([x for x in sickgear.providers.sortedProviderList() if x.is_active() and + return 0 < len([x for x in sickgear.providers.sorted_sources() if x.is_active() and getattr(x, 'enable_backlog', None) and (not torrent_only or GenericProvider.TORRENT == x.providerType) and (not scheduled or getattr(x, 'enable_scheduled_backlog', None))]) @@ -214,7 +214,7 @@ class BacklogSearcher(object): any_torrent_enabled = any(map( lambda x: x.is_active() and getattr(x, 'enable_backlog', None) and GenericProvider.TORRENT == x.providerType, - sickgear.providers.sortedProviderList())) + sickgear.providers.sorted_sources())) if not any_torrent_enabled: logger.log('Last scheduled backlog run was within the last day, skipping this run.', logger.DEBUG) return @@ -383,8 +383,8 @@ class BacklogSearcher(object): if last_backlog > datetime.date.today().toordinal(): last_backlog = 1 - self._lastBacklog = last_backlog - return self._lastBacklog + self.last_backlog = last_backlog + return self.last_backlog @staticmethod def _set_last_backlog(when): diff --git a/sickgear/search_queue.py b/sickgear/search_queue.py index 62844ac8..88d430d2 100644 --- a/sickgear/search_queue.py +++ b/sickgear/search_queue.py @@ -22,12 +22,10 @@ import re import threading import traceback -import exceptions_helper # noinspection PyPep8Naming from exceptions_helper import ex import sickgear -from lib.dateutil import tz from . import common, db, failed_history, generic_queue, helpers, \ history, logger, network_timezones, properFinder, search, ui from .classes import Proper, SimpleNamespace @@ -519,7 +517,7 @@ class RecentSearchQueueItem(generic_queue.QueueItem): threads = [] providers = list(filter(lambda x: x.is_active() and x.enable_recentsearch, - sickgear.providers.sortedProviderList())) + sickgear.providers.sorted_sources())) for cur_provider in providers: if not cur_provider.cache.should_update(): continue @@ -528,7 +526,7 @@ class RecentSearchQueueItem(generic_queue.QueueItem): logger.log('Updating provider caches with recent upload data') # spawn a thread for each provider to save time waiting for slow response providers - threads.append(threading.Thread(target=cur_provider.cache.updateCache, + threads.append(threading.Thread(target=cur_provider.cache.update_cache, kwargs={'needed': needed}, name='%s :: [%s]' % (orig_thread_name, cur_provider.name))) # start the thread we just created @@ -645,7 +643,7 @@ class ManualSearchQueueItem(BaseSearchQueueItem): ep_count, ep_count_scene = get_aired_in_season(self.show_obj) set_wanted_aired(self.segment, True, ep_count, ep_count_scene, manual=True) if not getattr(self.segment, 'wanted_quality', None): - ep_status, ep_quality = common.Quality.splitCompositeStatus(self.segment.status) + ep_status, ep_quality = common.Quality.split_composite_status(self.segment.status) self.segment.wanted_quality = search.get_wanted_qualities(self.segment, ep_status, ep_quality, unaired=True, manual=True) if not self.segment.wanted_quality: diff --git a/sickgear/sgdatetime.py b/sickgear/sgdatetime.py index 043bedb0..f963c76d 100644 --- a/sickgear/sgdatetime.py +++ b/sickgear/sgdatetime.py @@ -27,7 +27,7 @@ from six import integer_types, string_types # noinspection PyUnreachableCode if False: - from typing import Callable, Optional, Union + from typing import Optional, Union date_presets = ('%Y-%m-%d', '%a, %Y-%m-%d', @@ -234,7 +234,7 @@ class SGDatetime(datetime.datetime): """ convert datetime to filetime special handling for windows filetime issues - for pre Windows 7 this can result in an exception for pre 1970 dates + for pre Windows 7 this can result in an exception for pre-1970 dates """ obj = (dt, self)[self is not None] # type: datetime.datetime if is_win: @@ -286,6 +286,7 @@ class SGDatetime(datetime.datetime): # noinspection PyUnreachableCode if False: # just to trick pycharm in correct type detection + # noinspection PyUnusedLocal def timestamp_near(d_t): # type: (datetime.datetime) -> float pass diff --git a/sickgear/show_name_helpers.py b/sickgear/show_name_helpers.py index b0c00027..0ee26627 100644 --- a/sickgear/show_name_helpers.py +++ b/sickgear/show_name_helpers.py @@ -264,7 +264,7 @@ def make_scene_season_search_string(show_obj, # type: sickgear.tv.TVShow ep_obj_list = show_obj.get_all_episodes(ep_obj.season) # get show qualities - any_qualities, best_qualities = common.Quality.splitQuality(show_obj.quality) + any_qualities, best_qualities = common.Quality.split_quality(show_obj.quality) # compile a list of all the episode numbers we need in this 'season' season_strings = [] @@ -272,7 +272,7 @@ def make_scene_season_search_string(show_obj, # type: sickgear.tv.TVShow # get quality of the episode cur_composite_status = episode.status - cur_status, cur_quality = common.Quality.splitCompositeStatus(cur_composite_status) + cur_status, cur_quality = common.Quality.split_composite_status(cur_composite_status) if best_qualities: highest_best_quality = max(best_qualities) @@ -378,7 +378,7 @@ def all_possible_show_names(show_obj, season=-1, force_anime=False): # type: (sickgear.tv.TVShow, int, bool) -> List[AnyStr] """ Figures out every possible variation of the name for a particular show. Includes TVDB name, TVRage name, - country codes on the end, eg. "Show Name (AU)", and any scene exception names. + country codes on the end, e.g. "Show Name (AU)", and any scene exception names. :param show_obj: a TVShow object that we should get the names of :param season: season @@ -387,7 +387,7 @@ def all_possible_show_names(show_obj, season=-1, force_anime=False): """ show_names = get_scene_exceptions(show_obj.tvid, show_obj.prodid, season=season)[:] - if not show_names: # if we dont have any season specific exceptions fallback to generic exceptions + if not show_names: # if we don't have any season specific exceptions fallback to generic exceptions season = -1 show_names = get_scene_exceptions(show_obj.tvid, show_obj.prodid, season=season)[:] diff --git a/sickgear/show_queue.py b/sickgear/show_queue.py index 70a6cc6d..03046c93 100644 --- a/sickgear/show_queue.py +++ b/sickgear/show_queue.py @@ -931,7 +931,7 @@ class QueueItemAdd(ShowQueueItem): wanted_updates.append({'season': sr['season'], 'episode': sr['episode'], 'status': sr['status']}) elif sr['status'] not in [WANTED]: - cur_status, cur_quality = Quality.splitCompositeStatus(int(sr['status'])) + cur_status, cur_quality = Quality.split_composite_status(int(sr['status'])) if sickgear.WANTEDLIST_CACHE.get_wantedlist( self.quality, self.upgrade_once, cur_quality, cur_status, unaired=(sickgear.SEARCH_UNAIRED and not sickgear.UNAIRED_RECENT_SEARCH_ONLY)): @@ -1155,7 +1155,7 @@ class QueueItemAdd(ShowQueueItem): raise # update internal name cache - name_cache.buildNameCache(self.show_obj) + name_cache.build_name_cache(self.show_obj) self.show_obj.load_episodes_from_db() @@ -1446,7 +1446,7 @@ class QueueItemUpdate(ShowQueueItem): for cur_season in db_ep_obj_list: for cur_episode in db_ep_obj_list[cur_season]: ep_obj = self.show_obj.get_episode(cur_season, cur_episode) # type: Optional[TVEpisode] - status = sickgear.common.Quality.splitCompositeStatus(ep_obj.status)[0] + status = sickgear.common.Quality.split_composite_status(ep_obj.status)[0] if self.switch or should_delete_episode(status): if self.switch: cl.append(self.show_obj.switch_ep_change_sql( diff --git a/sickgear/show_updater.py b/sickgear/show_updater.py index 901b431c..9d6970be 100644 --- a/sickgear/show_updater.py +++ b/sickgear/show_updater.py @@ -220,7 +220,7 @@ class ShowUpdater(object): if len(pi_list): sickgear.show_queue_scheduler.action.daily_update_running = True - ui.ProgressIndicators.setIndicator('dailyUpdate', ui.QueueProgressIndicator('Daily Update', pi_list)) + ui.ProgressIndicators.set_indicator('dailyUpdate', ui.QueueProgressIndicator('Daily Update', pi_list)) logger.log(u'Added all shows to show queue for full update') diff --git a/sickgear/traktChecker.py b/sickgear/traktChecker.py deleted file mode 100644 index 851ed124..00000000 --- a/sickgear/traktChecker.py +++ /dev/null @@ -1,222 +0,0 @@ -# Author: Frank Fenton -# -# This file is part of SickGear. -# -# SickGear is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# SickGear is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with SickGear. If not, see . - -import datetime -import os -import traceback - -import sickgear -from . import helpers, logger, search_queue -from .common import SKIPPED, WANTED -from .indexers.indexer_config import TVINFO_TVRAGE - - -class TraktChecker(object): - def __init__(self): - self.todoWanted = [] - - def run(self, force=False): - try: - # add shows from trakt.tv watchlist - if sickgear.TRAKT_USE_WATCHLIST: - self.todoWanted = [] # its about to all get re-added - if len(sickgear.ROOT_DIRS.split('|')) < 2: - logger.log(u"No default root directory", logger.ERROR) - return - self.updateShows() - self.updateEpisodes() - - # sync trakt.tv library with SickGear library - if sickgear.TRAKT_SYNC: - self.syncLibrary() - except Exception: - logger.log(traceback.format_exc(), logger.DEBUG) - - def findShow(self, tvid, prodid): - library = TraktCall("user/library/shows/all.json/%API%/" + sickgear.TRAKT_USERNAME, sickgear.TRAKT_API, sickgear.TRAKT_USERNAME, sickgear.TRAKT_PASSWORD) - - if library == 'NULL': - logger.log(u"No shows found in your library, aborting library update", logger.DEBUG) - return - - if not library: - logger.log(u"Could not connect to trakt service, aborting library check", logger.ERROR) - return - - return filter(lambda x: int(prodid) in [int(x['tvdb_id']) or 0, int(x['tvrage_id'])] or 0, library) - - def syncLibrary(self): - logger.log(u"Syncing Trakt.tv show library", logger.DEBUG) - - for cur_show_obj in sickgear.showList: - self.addShowToTraktLibrary(cur_show_obj) - - def removeShowFromTraktLibrary(self, show_obj): - data = {} - if self.findShow(show_obj.tvid, show_obj.prodid): - # URL parameters - data['tvdb_id'] = helpers.mapIndexersToShow(show_obj)[1] - data['title'] = show_obj.name - data['year'] = show_obj.startyear - - if len(data): - logger.log(u"Removing " + show_obj.name + " from trakt.tv library", logger.DEBUG) - TraktCall("show/unlibrary/%API%", sickgear.TRAKT_API, sickgear.TRAKT_USERNAME, sickgear.TRAKT_PASSWORD, - data) - - def addShowToTraktLibrary(self, show_obj): - """ - Sends a request to trakt indicating that the given show and all its episodes is part of our library. - - show_obj: The TVShow object to add to trakt - """ - - data = {} - - if not self.findShow(show_obj.tvid, show_obj.prodid): - # URL parameters - data['tvdb_id'] = helpers.mapIndexersToShow(show_obj)[1] - data['title'] = show_obj.name - data['year'] = show_obj.startyear - - if len(data): - logger.log(u"Adding " + show_obj.name + " to trakt.tv library", logger.DEBUG) - TraktCall("show/library/%API%", sickgear.TRAKT_API, sickgear.TRAKT_USERNAME, sickgear.TRAKT_PASSWORD, - data) - - def updateShows(self): - logger.log(u"Starting trakt show watchlist check", logger.DEBUG) - watchlist = TraktCall("user/watchlist/shows.json/%API%/" + sickgear.TRAKT_USERNAME, sickgear.TRAKT_API, sickgear.TRAKT_USERNAME, sickgear.TRAKT_PASSWORD) - - if watchlist == 'NULL': - logger.log(u"No shows found in your watchlist, aborting watchlist update", logger.DEBUG) - return - - if not watchlist: - logger.log(u"Could not connect to trakt service, aborting watchlist update", logger.ERROR) - return - - for show in watchlist: - tvid = int(sickgear.TRAKT_DEFAULT_INDEXER) - prodid = int(show[('tvdb_id', 'tvrage_id')[TVINFO_TVRAGE == tvid]]) - - if int(sickgear.TRAKT_METHOD_ADD) != 2: - self.addDefaultShow(tvid, prodid, show["title"], SKIPPED) - else: - self.addDefaultShow(tvid, prodid, show["title"], WANTED) - - if int(sickgear.TRAKT_METHOD_ADD) == 1: - show_obj = helpers.find_show_by_id({tvid: prodid}) - if None is not show_obj: - self.setEpisodeToWanted(show_obj, 1, 1) - else: - self.todoWanted.append((prodid, 1, 1)) - - def updateEpisodes(self): - """ - Sets episodes to wanted that are in trakt watchlist - """ - logger.log(u"Starting trakt episode watchlist check", logger.DEBUG) - watchlist = TraktCall("user/watchlist/episodes.json/%API%/" + sickgear.TRAKT_USERNAME, sickgear.TRAKT_API, sickgear.TRAKT_USERNAME, sickgear.TRAKT_PASSWORD) - - if watchlist == 'NULL': - logger.log(u"No episodes found in your watchlist, aborting watchlist update", logger.DEBUG) - return - - if not watchlist: - logger.log(u"Could not connect to trakt service, aborting watchlist update", logger.ERROR) - return - - for show in watchlist: - tvid = int(sickgear.TRAKT_DEFAULT_INDEXER) - prodid = int(show[('tvdb_id', 'tvrage_id')[TVINFO_TVRAGE == tvid]]) - - self.addDefaultShow(tvid, prodid, show['title'], SKIPPED) - show_obj = helpers.find_show_by_id({tvid: prodid}) - - try: - if show_obj and show_obj.tvid == tvid: - for episode in show["episodes"]: - if None is not show_obj: - self.setEpisodeToWanted(show_obj, episode["season"], episode["number"]) - else: - self.todoWanted.append((prodid, episode["season"], episode["number"])) - except TypeError: - logger.log(u"Could not parse the output from trakt for " + show["title"], logger.DEBUG) - - def addDefaultShow(self, tvid, prod_id, name, status): - """ - Adds a new show with the default settings - """ - if not helpers.find_show_by_id({int(tvid): int(prodid)}): - logger.log(u"Adding show " + str(prod_id)) - root_dirs = sickgear.ROOT_DIRS.split('|') - - try: - location = root_dirs[int(root_dirs[0]) + 1] - except: - location = None - - if location: - showPath = os.path.join(location, helpers.sanitize_filename(name)) - dir_exists = helpers.make_dir(showPath) - if not dir_exists: - logger.log(u"Unable to create the folder " + showPath + ", can't add the show", logger.ERROR) - return - else: - helpers.chmod_as_parent(showPath) - - sickgear.show_queue_scheduler.action.add_show( - int(tvid), int(prod_id), showPath, - quality=int(sickgear.QUALITY_DEFAULT), - paused=sickgear.TRAKT_START_PAUSED, default_status=status, - flatten_folders=int(sickgear.FLATTEN_FOLDERS_DEFAULT) - ) - else: - logger.log(u"There was an error creating the show, no root directory setting found", logger.ERROR) - return - - def setEpisodeToWanted(self, show_obj, s, e): - """ - Sets an episode to wanted, only is it is currently skipped - """ - ep_obj = show_obj.get_episode(int(s), int(e)) - if ep_obj: - - with ep_obj.lock: - if ep_obj.status != SKIPPED or ep_obj.airdate == datetime.date.fromordinal(1): - return - - logger.log(u"Setting episode s" + str(s) + "e" + str(e) + " of show " + show_obj.name + " to wanted") - # figure out what segment the episode is in and remember it so we can backlog it - - ep_obj.status = WANTED - ep_obj.save_to_db() - - backlog_queue_item = search_queue.BacklogQueueItem(show_obj, [ep_obj]) - sickgear.search_queue_scheduler.action.add_item(backlog_queue_item) - - logger.log(u"Starting backlog for " + show_obj.name + " season " + str( - s) + " episode " + str(e) + " because some eps were set to wanted") - - def manageNewShow(self, show_obj): - logger.log(u"Checking if trakt watch list wants to search for episodes from new show " + show_obj.name, - logger.DEBUG) - episodes = [i for i in self.todoWanted if i[0] == show_obj.prodid] - for episode in episodes: - self.todoWanted.remove(episode) - self.setEpisodeToWanted(show_obj, episode[1], episode[2]) diff --git a/sickgear/tv.py b/sickgear/tv.py index 7edb23d2..73143391 100644 --- a/sickgear/tv.py +++ b/sickgear/tv.py @@ -43,7 +43,7 @@ import sickgear from . import db, helpers, history, image_cache, indexermapper, logger, \ name_cache, network_timezones, notifiers, postProcessor, subtitles from .anime import AniGroupList -from .classes import weakList +from .classes import WeakList from .common import Quality, statusStrings, \ ARCHIVED, DOWNLOADED, FAILED, IGNORED, SKIPPED, SNATCHED, SNATCHED_ANY, SNATCHED_PROPER, UNAIRED, UNKNOWN, WANTED, \ NAMING_DUPLICATE, NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_PREFIXED, NAMING_SEPARATED_REPEAT @@ -280,7 +280,7 @@ def usable_id(value): def usable_rid(value): # type: (Union[AnyStr]) -> Optional[AnyStr] """ - return value if is a id:format is valid + return value if is an id:format is valid otherwise None if value fails basic id format validation """ if isinstance(value, string_types) and ':' in value: @@ -452,7 +452,7 @@ class Person(Referential): def reset(self, person_obj=None): # type: (TVInfoPerson) -> None """ - reset all properties with the exception of: name, id, ids + reset all properties except; name, id, ids :param person_obj: TVInfo Person object to reset to """ @@ -790,7 +790,7 @@ class Person(Referential): for i in (TVINFO_TRAKT, TVINFO_IMDB, TVINFO_TMDB, TVINFO_TVMAZE, TVINFO_TVDB): if not rp.ids.get(i): continue - # in case it's the current source use it's id and lock if from being changed + # in case it's the current source use its id and lock if from being changed if cur_tv_info_src == i and rp.ids.get(i): source_confirmed[i] = True if rp.ids.get(i) != self.ids.get(i): @@ -1403,8 +1403,8 @@ class TVShow(TVShowBase): @cast_list.setter def cast_list(self, value): - # type: (weakList[Character]) -> None - self._cast_list = None if not isinstance(value, weakList) else weakref.ref(value) + # type: (WeakList[Character]) -> None + self._cast_list = None if not isinstance(value, WeakList) else weakref.ref(value) @property def network_id(self): @@ -1900,7 +1900,7 @@ class TVShow(TVShowBase): bio=cur_row['c_bio'], ids=c_ids, image_url=cur_row['image_url'], person=[person], persons_years=p_years, show_obj=self, sid=cur_row['c_id'], thumb_url=cur_row['thumb_url'], updated=cur_row['cast_updated'])) - cast_list = weakList(c for c in old_cast or [] if c.id not in old_list) + cast_list = WeakList(c for c in old_cast or [] if c.id not in old_list) self.cast_list = cast_list return cast_list @@ -1990,7 +1990,7 @@ class TVShow(TVShowBase): return True return False - # In some situations self.status = None.. need to figure out where that is! + # In some situations self.status = None, need to figure out where that is! if not self._status: self.status = '' logger.log('Status missing for show: [%s] with status: [%s]' % @@ -2026,7 +2026,7 @@ class TVShow(TVShowBase): last_airdate = datetime.date.fromordinal(sql_result[1][0]['airdate']) \ if sql_result and sql_result[1] else datetime.date.fromordinal(1) - # if show is not 'Ended' and last episode aired less then 460 days ago + # if show is not 'Ended' and last episode aired less than 460 days ago # or don't have an airdate for the last episode always update (status 'Continuing' or '') update_days_limit = 2013 ended_limit = datetime.timedelta(days=update_days_limit) @@ -2446,7 +2446,7 @@ class TVShow(TVShowBase): logger.log('No episode number found in %s, ignoring it' % path, logger.ERROR) return None - # for now lets assume that any episode in the show dir belongs to that show + # for now let's assume that any episode in the show dir belongs to that show season_number = parse_result.season_number if None is not parse_result.season_number else 1 episode_numbers = parse_result.episode_numbers root_ep_obj = None @@ -2471,7 +2471,7 @@ class TVShow(TVShowBase): else: # if there is a new file associated with this ep then re-check the quality - status, quality = sickgear.common.Quality.splitCompositeStatus(ep_obj.status) + status, quality = sickgear.common.Quality.split_composite_status(ep_obj.status) if IGNORED == status: continue @@ -2506,25 +2506,25 @@ class TVShow(TVShowBase): # if user replaces a file, attempt to recheck the quality unless it's know to be the same file if check_quality_again and not same_file: - new_quality = Quality.nameQuality(path, self.is_anime) + new_quality = Quality.name_quality(path, self.is_anime) if Quality.UNKNOWN == new_quality: - new_quality = Quality.fileQuality(path) + new_quality = Quality.file_quality(path) logger.log('Since this file was renamed, file %s was checked and quality "%s" found' % (path, Quality.qualityStrings[new_quality]), logger.DEBUG) - status, quality = sickgear.common.Quality.splitCompositeStatus(ep_obj.status) + status, quality = sickgear.common.Quality.split_composite_status(ep_obj.status) if Quality.UNKNOWN != new_quality or status in (SKIPPED, UNAIRED): - ep_obj.status = Quality.compositeStatus(DOWNLOADED, new_quality) + ep_obj.status = Quality.composite_status(DOWNLOADED, new_quality) # check for status/quality changes as long as it's a new file elif not same_file and sickgear.helpers.has_media_ext(path)\ and ep_obj.status not in Quality.DOWNLOADED + Quality.ARCHIVED + [IGNORED]: - old_status, old_quality = Quality.splitCompositeStatus(ep_obj.status) - new_quality = Quality.nameQuality(path, self.is_anime) + old_status, old_quality = Quality.split_composite_status(ep_obj.status) + new_quality = Quality.name_quality(path, self.is_anime) if Quality.UNKNOWN == new_quality: - new_quality = Quality.fileQuality(path) + new_quality = Quality.file_quality(path) if Quality.UNKNOWN == new_quality: - new_quality = Quality.assumeQuality(path) + new_quality = Quality.assume_quality(path) new_status = None @@ -2536,7 +2536,7 @@ class TVShow(TVShowBase): logger.DEBUG) new_status = DOWNLOADED - # if it was snatched proper and we found a higher quality one then allow the status change + # if it was snatched proper, and we found a higher quality one then allow the status change elif SNATCHED_PROPER == old_status and old_quality < new_quality: logger.log('STATUS: this episode used to be snatched proper with quality %s but' ' a file exists with quality %s so setting the status to DOWNLOADED' @@ -2550,18 +2550,18 @@ class TVShow(TVShowBase): if None is not new_status: with ep_obj.lock: logger.log('STATUS: we have an associated file, so setting the status from %s to DOWNLOADED/%s' - % (ep_obj.status, Quality.compositeStatus(new_status, new_quality)), logger.DEBUG) - ep_obj.status = Quality.compositeStatus(new_status, new_quality) + % (ep_obj.status, Quality.composite_status(new_status, new_quality)), logger.DEBUG) + ep_obj.status = Quality.composite_status(new_status, new_quality) elif same_file: - status, quality = Quality.splitCompositeStatus(ep_obj.status) + status, quality = Quality.split_composite_status(ep_obj.status) if status in (SKIPPED, UNAIRED): - new_quality = Quality.nameQuality(path, self.is_anime) + new_quality = Quality.name_quality(path, self.is_anime) if Quality.UNKNOWN == new_quality: - new_quality = Quality.fileQuality(path) + new_quality = Quality.file_quality(path) logger.log('Since this file has status: "%s", file %s was checked and quality "%s" found' % (statusStrings[status], path, Quality.qualityStrings[new_quality]), logger.DEBUG) - ep_obj.status = Quality.compositeStatus(DOWNLOADED, new_quality) + ep_obj.status = Quality.composite_status(DOWNLOADED, new_quality) with ep_obj.lock: result = ep_obj.get_sql() @@ -2773,7 +2773,7 @@ class TVShow(TVShowBase): :param scheduled_update: :param switch: """ - # There's gotta be a better way of doing this but we don't wanna + # There's gotta be a better way of doing this, but we don't want to # change the cache value elsewhere if None is tvapi: tvinfo_config = sickgear.TVInfoAPI(self.tvid).api_params.copy() @@ -2900,7 +2900,7 @@ class TVShow(TVShowBase): cast_list = self._load_cast_from_db() remove_char_ids = {c.id for c in cast_list or []} - cast_ordered = weakList() + cast_ordered = WeakList() for ct, c_l in iteritems(show_info_cast): # type: (integer_types, List[TVInfoCharacter]) if ct not in (RoleTypes.ActorMain, RoleTypes.Host, RoleTypes.Interviewer, RoleTypes.Presenter): continue @@ -3386,11 +3386,11 @@ class TVShow(TVShowBase): # check if downloaded files still exist, update our data if this has changed if 1 != sickgear.SKIP_REMOVED_FILES: with ep_obj.lock: - # if it used to have a file associated with it and it doesn't anymore then set it to IGNORED + # if it used to have a file associated with it, and it doesn't anymore then set it to IGNORED if ep_obj.location and ep_obj.status in Quality.DOWNLOADED: if ARCHIVED == sickgear.SKIP_REMOVED_FILES: - ep_obj.status = Quality.compositeStatus( - ARCHIVED, Quality.qualityDownloaded(ep_obj.status)) + ep_obj.status = Quality.composite_status( + ARCHIVED, Quality.quality_downloaded(ep_obj.status)) else: ep_obj.status = (sickgear.SKIP_REMOVED_FILES, IGNORED)[ not sickgear.SKIP_REMOVED_FILES] @@ -3545,7 +3545,7 @@ class TVShow(TVShowBase): sickgear.FANART_RATINGS[self.tvid_prodid] = rating sickgear.save_config() - name_cache.buildNameCache(self) + name_cache.build_name_cache(self) self.reset_not_found_count() old_sid_int = self.create_sid(old_tvid, old_prodid) if old_sid_int != self.sid_int: @@ -3680,7 +3680,7 @@ class TVShow(TVShowBase): wq = getattr(self.sxe_ep_obj.get(season, {}).get(episode, {}), 'wanted_quality', None) if None is not wq: if quality in wq: - cur_status, cur_quality = Quality.splitCompositeStatus(self.sxe_ep_obj[season][episode].status) + cur_status, cur_quality = Quality.split_composite_status(self.sxe_ep_obj[season][episode].status) if cur_status in (WANTED, UNAIRED, SKIPPED, FAILED): logger.log('Existing episode status is wanted/unaired/skipped/failed,' ' getting found episode', logger.DEBUG) @@ -3700,7 +3700,7 @@ class TVShow(TVShowBase): pass # if the quality isn't one we want under any circumstances then just say no - initial_qualities, archive_qualities = Quality.splitQuality(self._quality) + initial_qualities, archive_qualities = Quality.split_quality(self._quality) all_qualities = list(set(initial_qualities + archive_qualities)) initial = '= (%s)' % ','.join([Quality.qualityStrings[qual] for qual in initial_qualities]) @@ -3725,7 +3725,7 @@ class TVShow(TVShowBase): logger.log('Unable to find a matching episode in database, ignoring found episode', logger.DEBUG) return False - cur_status, cur_quality = Quality.splitCompositeStatus(int(sql_result[0]['status'])) + cur_status, cur_quality = Quality.split_composite_status(int(sql_result[0]['status'])) ep_status_text = statusStrings[cur_status] logger.log('Existing episode status: %s (%s)' % (statusStrings[cur_status], ep_status_text), logger.DEBUG) @@ -4011,7 +4011,7 @@ class TVEpisode(TVEpisodeBase): return self.refresh_subtitles() - # added the if because sometime it raises an error + # added the if because sometimes it raises an error self.subtitles_searchcount = self.subtitles_searchcount + 1 if self.subtitles_searchcount else 1 self.subtitles_lastsearch = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') self.save_to_db() @@ -4292,7 +4292,7 @@ class TVEpisode(TVEpisodeBase): except (BaseTVinfoEpisodenotfound, BaseTVinfoSeasonnotfound): logger.log('Unable to find the episode on %s... has it been removed? Should I delete from db?' % sickgear.TVInfoAPI(self.tvid).name, logger.DEBUG) - # if I'm no longer on the Indexers but I once was then delete myself from the DB + # if I'm no longer on the Indexers, but I once was then delete myself from the DB if -1 != self._epid and helpers.should_delete_episode(self._status): self.delete_episode() elif UNKNOWN == self._status: @@ -4352,7 +4352,7 @@ class TVEpisode(TVEpisodeBase): except (ValueError, IndexError): logger.error('Malformed air date retrieved from %s (%s - %sx%s)' % (sickgear.TVInfoAPI(self.tvid).name, self.show_obj.unique_name, season, episode)) - # if I'm incomplete on TVDB but I once was complete then just delete myself from the DB for now + # if I'm incomplete on TVDB, but I once was complete then just delete myself from the DB for now if -1 != self._epid and helpers.should_delete_episode(self._status): self.delete_episode() elif UNKNOWN == self._status: @@ -4484,7 +4484,7 @@ class TVEpisode(TVEpisodeBase): # leave propers alone, you have to either post-process them or manually change them back elif self._status not in Quality.SNATCHED_ANY + Quality.DOWNLOADED + Quality.ARCHIVED: msg = '(1) Status changes from %s to ' % statusStrings[self._status] - self.status = Quality.statusFromNameOrFile(self._location, anime=self._show_obj.is_anime) + self.status = Quality.status_from_name_or_file(self._location, anime=self._show_obj.is_anime) logger.log('%s%s' % (msg, statusStrings[self._status]), logger.DEBUG) # shouldn't get here probably @@ -4513,7 +4513,7 @@ class TVEpisode(TVEpisodeBase): if '' != self.location: if UNKNOWN == self._status and sickgear.helpers.has_media_ext(self.location): - status_quality = Quality.statusFromNameOrFile(self.location, anime=self._show_obj.is_anime) + status_quality = Quality.status_from_name_or_file(self.location, anime=self._show_obj.is_anime) logger.log('(3) Status changes from %s to %s' % (self._status, status_quality), logger.DEBUG) self.status = status_quality @@ -4841,8 +4841,8 @@ class TVEpisode(TVEpisodeBase): def _ep_name(self): """ :return: the name of the episode to use during renaming. Combines the names of related episodes. - Eg. "Ep Name (1)" and "Ep Name (2)" becomes "Ep Name" - "Ep Name" and "Other Ep Name" becomes "Ep Name & Other Ep Name" + E.g. "Ep Name (1)" and "Ep Name (2)" becomes "Ep Name" + "Ep Name" and "Other Ep Name" becomes "Ep Name & Other Ep Name" :rtype: AnyStr """ @@ -4915,7 +4915,7 @@ class TVEpisode(TVEpisodeBase): return '' return parse_result.release_group - ep_status, ep_qual = Quality.splitCompositeStatus(self._status) + ep_status, ep_qual = Quality.split_composite_status(self._status) if sickgear.NAMING_STRIP_YEAR: show_name = re.sub(r'\(\d+\)$', '', self._show_obj.name).rstrip() @@ -5061,7 +5061,7 @@ class TVEpisode(TVEpisodeBase): if not ep_sep or not ep_format: continue - # start with the ep string, eg. E03 + # start with the ep string, e.g. E03 ep_string = self._format_string(ep_format.upper(), replace_map) for cur_ep_obj in self.related_ep_obj: @@ -5089,7 +5089,7 @@ class TVEpisode(TVEpisodeBase): if 3 != anime_type: absolute_number = (self._absolute_number, self._episode)[0 == self._absolute_number] - if 0 != self._season: # dont set absolute numbers if we are on specials ! + if 0 != self._season: # don't set absolute numbers if we are on specials ! if 1 == anime_type: # this crazy person wants both ! (note: +=) ep_string += sep + '%(#)03d' % {'#': absolute_number} elif 2 == anime_type: # total anime freak only need the absolute number ! (note: =) @@ -5272,7 +5272,7 @@ class TVEpisode(TVEpisodeBase): def airdate_modify_stamp(self): """ - Make the modify date and time of a file reflect the show air date and time. + Make modify date and time of a file reflect the show air date and time. Note: Also called from postProcessor """ diff --git a/sickgear/tv_base.py b/sickgear/tv_base.py index b8a72466..d00dc284 100644 --- a/sickgear/tv_base.py +++ b/sickgear/tv_base.py @@ -20,7 +20,7 @@ import sickgear from . import logger from ._legacy_classes import LegacyTVShow, LegacyTVEpisode from .common import UNKNOWN -from .name_cache import buildNameCache +from .name_cache import build_name_cache from six import string_types @@ -132,7 +132,7 @@ class TVShowBase(LegacyTVShow, TVBase): _current_name = self._name self.dirty_setter('_name')(self, *arg) if _current_name != self._name: - buildNameCache(self) + build_name_cache(self) # imdbid = property(lambda self: self._imdbid, dirty_setter('_imdbid')) @property diff --git a/sickgear/tvcache.py b/sickgear/tvcache.py index 16d5d967..0cb50660 100644 --- a/sickgear/tvcache.py +++ b/sickgear/tvcache.py @@ -33,6 +33,7 @@ from .tv import TVEpisode # noinspection PyUnreachableCode if False: from typing import Any, AnyStr, Dict, List, Tuple, Union + from providers.generic import GenericProvider, NZBProvider, TorrentProvider class CacheDBConnection(db.DBConnection): @@ -50,7 +51,7 @@ class CacheDBConnection(db.DBConnection): class TVCache(object): def __init__(self, provider, interval=10): - # type: (AnyStr, int) -> None + # type: (Union[GenericProvider, NZBProvider, TorrentProvider], int) -> None self.provider = provider self.providerID = self.provider.get_id() self.providerDB = None @@ -60,7 +61,7 @@ class TVCache(object): def get_db(): return CacheDBConnection() - def _clearCache(self): + def clear_cache(self): if self.should_clear_cache(): my_db = self.get_db() my_db.action('DELETE FROM provider_cache WHERE provider = ?', [self.providerID]) @@ -81,26 +82,13 @@ class TVCache(object): data = None return data - def _checkAuth(self): + def check_auth(self): # noinspection PyProtectedMember return self.provider._check_auth() - @staticmethod - def _checkItemAuth(title, url): - """ - - :param title: title - :type title: AnyStr - :param url: url - :type url: AnyStr - :return: - :rtype: bool - """ - return True - - def updateCache(self, **kwargs): + def update_cache(self, **kwargs): try: - self._checkAuth() + self.check_auth() except AuthException as e: logger.log(u'Authentication error: ' + ex(e), logger.ERROR) return [] @@ -110,13 +98,13 @@ class TVCache(object): # clear cache if data: - self._clearCache() + self.clear_cache() # parse data cl = [] for item in data or []: title, url = self._title_and_url(item) - ci = self._parseItem(title, url) + ci = self.parse_item(title, url) if None is not ci: cl.append(ci) @@ -128,13 +116,13 @@ class TVCache(object): logger.log('Warning could not save cache value [%s], caught err: %s' % (cl, ex(e))) # set updated as time the attempt to fetch data is - self.setLastUpdate() + self.set_last_update() def get_rss(self, url, **kwargs): return RSSFeeds(self.provider).get_feed(url, **kwargs) @staticmethod - def _translateTitle(title): + def _translate_title(title): """ :param title: title @@ -145,7 +133,7 @@ class TVCache(object): return u'' + title.replace(' ', '.') @staticmethod - def _translateLinkURL(url): + def _translate_link_url(url): """ :param url: url @@ -155,7 +143,7 @@ class TVCache(object): """ return url.replace('&', '&') - def _parseItem(self, title, url): + def parse_item(self, title, url): """ :param title: title @@ -165,18 +153,16 @@ class TVCache(object): :return: :rtype: None or List[AnyStr, List[Any]] """ - self._checkItemAuth(title, url) - if title and url: - title = self._translateTitle(title) - url = self._translateLinkURL(url) + title = self._translate_title(title) + url = self._translate_link_url(url) return self.add_cache_entry(title, url) logger.log('Data returned from the %s feed is incomplete, this result is unusable' % self.provider.name, logger.DEBUG) - def _getLastUpdate(self): + def _get_last_update(self): """ :return: @@ -186,15 +172,15 @@ class TVCache(object): sql_result = my_db.select('SELECT time FROM lastUpdate WHERE provider = ?', [self.providerID]) if sql_result: - lastTime = int(sql_result[0]['time']) - if lastTime > int(timestamp_near(datetime.datetime.now())): - lastTime = 0 + last_time = int(sql_result[0]['time']) + if last_time > int(timestamp_near(datetime.datetime.now())): + last_time = 0 else: - lastTime = 0 + last_time = 0 - return datetime.datetime.fromtimestamp(lastTime) + return datetime.datetime.fromtimestamp(last_time) - def _getLastSearch(self): + def _get_last_search(self): """ :return: @@ -204,15 +190,15 @@ class TVCache(object): sql_result = my_db.select('SELECT time FROM lastSearch WHERE provider = ?', [self.providerID]) if sql_result: - lastTime = int(sql_result[0]['time']) - if lastTime > int(timestamp_near(datetime.datetime.now())): - lastTime = 0 + last_time = int(sql_result[0]['time']) + if last_time > int(timestamp_near(datetime.datetime.now())): + last_time = 0 else: - lastTime = 0 + last_time = 0 - return datetime.datetime.fromtimestamp(lastTime) + return datetime.datetime.fromtimestamp(last_time) - def setLastUpdate(self, to_date=None): + def set_last_update(self, to_date=None): """ :param to_date: date time @@ -226,7 +212,7 @@ class TVCache(object): {'time': int(time.mktime(to_date.timetuple()))}, {'provider': self.providerID}) - def setLastSearch(self, to_date=None): + def _set_last_search(self, to_date=None): """ :param to_date: date time @@ -240,8 +226,8 @@ class TVCache(object): {'time': int(time.mktime(to_date.timetuple()))}, {'provider': self.providerID}) - lastUpdate = property(_getLastUpdate) - lastSearch = property(_getLastSearch) + last_update = property(_get_last_update) + last_search = property(_get_last_search) def should_update(self): """ @@ -250,7 +236,7 @@ class TVCache(object): :rtype: bool """ # if we've updated recently then skip the update - return datetime.datetime.now() - self.lastUpdate >= datetime.timedelta(minutes=self.update_iv) + return datetime.datetime.now() - self.last_update >= datetime.timedelta(minutes=self.update_iv) def should_clear_cache(self): """ @@ -259,7 +245,7 @@ class TVCache(object): :rtype: bool """ # if recent search hasn't used our previous results yet then don't clear the cache - return self.lastSearch >= self.lastUpdate + return self.last_search >= self.last_update def add_cache_entry(self, name, # type: AnyStr @@ -340,22 +326,22 @@ class TVCache(object): url, cur_timestamp, quality, release_group, version, parse_result.show_obj.tvid]] - def searchCache(self, - episode, # type: TVEpisode - manual_search=False # type: bool - ): # type: (...) -> List[SearchResult] + def search_cache(self, + episode, # type: TVEpisode + manual_search=False # type: bool + ): # type: (...) -> List[SearchResult] """ :param episode: episode object :param manual_search: manual search :return: found results or empty List """ - neededEps = self.findNeededEpisodes(episode, manual_search) - if 0 != len(neededEps): - return neededEps[episode] + needed_eps = self.find_needed_episodes(episode, manual_search) + if 0 != len(needed_eps): + return needed_eps[episode] return [] - def listPropers(self, date=None): + def list_propers(self, date=None): """ :param date: date @@ -372,14 +358,14 @@ class TVCache(object): return list(filter(lambda x: x['indexerid'] != 0, my_db.select(sql, [self.providerID]))) - def findNeededEpisodes(self, ep_obj_list, manual_search=False): + def find_needed_episodes(self, ep_obj_list, manual_search=False): # type: (Union[TVEpisode, List[TVEpisode]], bool) -> Dict[TVEpisode, SearchResult] """ :param ep_obj_list: episode object or list of episode objects :param manual_search: manual search """ - neededEps = {} + needed_eps = {} cl = [] my_db = self.get_db() @@ -402,8 +388,8 @@ class TVCache(object): sql_result = list(itertools.chain(*sql_result)) if not sql_result: - self.setLastSearch() - return neededEps + self._set_last_search() + return needed_eps # for each cache entry for cur_result in sql_result: @@ -473,12 +459,12 @@ class TVCache(object): check_is_repack=True) # add it to the list - if ep_obj not in neededEps: - neededEps[ep_obj] = [result] + if ep_obj not in needed_eps: + needed_eps[ep_obj] = [result] else: - neededEps[ep_obj].append(result) + needed_eps[ep_obj].append(result) # datetime stamp this search so cache gets cleared - self.setLastSearch() + self._set_last_search() - return neededEps + return needed_eps diff --git a/sickgear/ui.py b/sickgear/ui.py index b03d9728..522a9093 100644 --- a/sickgear/ui.py +++ b/sickgear/ui.py @@ -117,7 +117,7 @@ class Notification(object): class ProgressIndicator(object): def __init__(self, percent_complete=0, current_status=None): - self.percentComplete = percent_complete + self.percent_complete = percent_complete self.currentStatus = {'title': ''} if None is current_status else current_status @@ -128,20 +128,20 @@ class ProgressIndicators(object): } @staticmethod - def getIndicator(name): + def get_indicator(name): if name not in ProgressIndicators._pi: return [] # if any of the progress indicators are done take them off the list for curPI in ProgressIndicators._pi[name]: - if None is not curPI and 100 == curPI.percentComplete(): + if None is not curPI and 100 == curPI.percent_complete(): ProgressIndicators._pi[name].remove(curPI) # return the list of progress indicators associated with this name return ProgressIndicators._pi[name] @staticmethod - def setIndicator(name, indicator): + def set_indicator(name, indicator): ProgressIndicators._pi[name].append(indicator) @@ -154,16 +154,16 @@ class QueueProgressIndicator(object): self.queueItemList = queue_item_list self.name = name - def numTotal(self): + def num_total(self): return len(self.queueItemList) - def numFinished(self): + def num_finished(self): return len([x for x in self.queueItemList if not x.is_in_queue()]) - def numRemaining(self): + def num_remaining(self): return len([x for x in self.queueItemList if x.is_in_queue()]) - def nextName(self): + def next_name(self): for curItem in [ sickgear.show_queue_scheduler.action.currentItem] + sickgear.show_queue_scheduler.action.queue: if curItem in self.queueItemList: @@ -171,13 +171,13 @@ class QueueProgressIndicator(object): return "Unknown" - def percentComplete(self): - numFinished = self.numFinished() - numTotal = self.numTotal() + def percent_complete(self): + num_finished = self.num_finished() + num_total = self.num_total() - if 0 == numTotal: + if 0 == num_total: return 0 - return int(float(numFinished) / float(numTotal) * 100) + return int(float(num_finished) / float(num_total) * 100) class LoadingTVShow(object): diff --git a/sickgear/watchedstate.py b/sickgear/watchedstate.py index 0ffbf41e..14454cbc 100644 --- a/sickgear/watchedstate.py +++ b/sickgear/watchedstate.py @@ -33,6 +33,7 @@ class WatchedStateUpdater(object): return sickgear.watched_state_queue_scheduler.action.is_in_queue(self.queue_item) def run(self): + # noinspection PyUnresolvedReferences if self.is_enabled(): self.amActive = True new_item = self.queue_item() diff --git a/sickgear/watchedstate_queue.py b/sickgear/watchedstate_queue.py index 7c5d83f5..81c8d614 100644 --- a/sickgear/watchedstate_queue.py +++ b/sickgear/watchedstate_queue.py @@ -48,7 +48,7 @@ class WatchedStateQueue(generic_queue.GenericQueue): return length - def add_item(self, item): + def add_item(self, item, **kwargs): if isinstance(item, EmbyWatchedStateQueueItem) and not self.is_in_queue(EmbyWatchedStateQueueItem): # emby watched state item generic_queue.GenericQueue.add_item(self, item) diff --git a/sickgear/webapi.py b/sickgear/webapi.py index bd0e6807..40246086 100644 --- a/sickgear/webapi.py +++ b/sickgear/webapi.py @@ -791,7 +791,7 @@ def _mapQuality(show_obj): anyQualities = [] bestQualities = [] - iqualityID, aqualityID = Quality.splitQuality(int(show_obj)) + iqualityID, aqualityID = Quality.split_quality(int(show_obj)) if iqualityID: for quality in iqualityID: anyQualities.append(quality_map[quality]) @@ -1155,7 +1155,7 @@ class CMD_SickGearEpisode(ApiCall): timezone, episode['timezone'] = network_timezones.get_network_timezone(show_obj.network, return_name=True) episode['airdate'] = SGDatetime.sbfdate(SGDatetime.convert_to_setting( network_timezones.parse_date_time(int(episode['airdate']), show_obj.airs, timezone)), d_preset=dateFormat) - status, quality = Quality.splitCompositeStatus(int(episode["status"])) + status, quality = Quality.split_composite_status(int(episode["status"])) episode["status"] = _get_status_Strings(status) episode["quality"] = _get_quality_string(quality) episode["file_size_human"] = _sizeof_fmt(episode["file_size"]) @@ -1224,7 +1224,7 @@ class CMD_SickGearEpisodeSearch(ApiCall): # return the correct json value if ep_queue_item.success: - status, quality = Quality.splitCompositeStatus(ep_obj.status) + status, quality = Quality.split_composite_status(ep_obj.status) # TODO: split quality and status? return _responds(RESULT_SUCCESS, {"quality": _get_quality_string(quality)}, "Snatched (" + _get_quality_string(quality) + ")") @@ -1348,7 +1348,7 @@ class CMD_SickGearEpisodeSetStatus(ApiCall): continue if None is not self.quality: - ep_obj.status = Quality.compositeStatus(self.status, self.quality) + ep_obj.status = Quality.composite_status(self.status, self.quality) else: ep_obj.status = self.status result = ep_obj.get_sql() @@ -1667,7 +1667,7 @@ class CMD_SickGearHistory(ApiCall): results = [] np = NameParser(True, testing=True, indexer_lookup=False, try_scene_exceptions=False) for cur_result in sql_result: - status, quality = Quality.splitCompositeStatus(int(cur_result["action"])) + status, quality = Quality.split_composite_status(int(cur_result["action"])) if type_filter and status not in type_filter: continue status = _get_status_Strings(status) @@ -2164,14 +2164,14 @@ class CMD_SickGearForceSearch(ApiCall): result = None if 'recent' == self.searchtype and not sickgear.search_queue_scheduler.action.is_recentsearch_in_progress() \ and not sickgear.recent_search_scheduler.action.amActive: - result = sickgear.recent_search_scheduler.forceRun() + result = sickgear.recent_search_scheduler.force_run() elif 'backlog' == self.searchtype and not sickgear.search_queue_scheduler.action.is_backlog_in_progress() \ and not sickgear.backlog_search_scheduler.action.amActive: sickgear.backlog_search_scheduler.force_search(force_type=FORCED_BACKLOG) result = True elif 'proper' == self.searchtype and not sickgear.search_queue_scheduler.action.is_propersearch_in_progress() \ and not sickgear.proper_finder_scheduler.action.amActive: - result = sickgear.proper_finder_scheduler.forceRun() + result = sickgear.proper_finder_scheduler.force_run() if result: return _responds(RESULT_SUCCESS, msg='%s search successfully forced' % self.searchtype) return _responds(RESULT_FAILURE, @@ -2666,7 +2666,7 @@ class CMD_SickGearSetDefaults(ApiCall): aqualityID.append(quality_map[quality]) if iqualityID or aqualityID: - sickgear.QUALITY_DEFAULT = Quality.combineQualities(iqualityID, aqualityID) + sickgear.QUALITY_DEFAULT = Quality.combine_qualities(iqualityID, aqualityID) if self.status: # convert the string status to a int @@ -3365,7 +3365,7 @@ class CMD_SickGearShowAddExisting(ApiCall): aqualityID.append(quality_map[quality]) if iqualityID or aqualityID: - newQuality = Quality.combineQualities(iqualityID, aqualityID) + newQuality = Quality.combine_qualities(iqualityID, aqualityID) sickgear.show_queue_scheduler.action.add_show( int(self.tvid), int(self.prodid), self.location, @@ -3471,7 +3471,7 @@ class CMD_SickGearShowAddNew(ApiCall): aqualityID.append(quality_map[quality]) if iqualityID or aqualityID: - newQuality = Quality.combineQualities(iqualityID, aqualityID) + newQuality = Quality.combine_qualities(iqualityID, aqualityID) # use default status as a failsafe newStatus = sickgear.STATUS_DEFAULT @@ -4144,7 +4144,7 @@ class CMD_SickGearShowSeasons(ApiCall): [self.tvid, self.prodid]) seasons = {} # type: Dict[int, Dict] for cur_result in sql_result: - status, quality = Quality.splitCompositeStatus(int(cur_result["status"])) + status, quality = Quality.split_composite_status(int(cur_result["status"])) cur_result["status"] = _get_status_Strings(status) cur_result["quality"] = _get_quality_string(quality) timezone, cur_result['timezone'] = network_timezones.get_network_timezone(show_obj.network, @@ -4177,7 +4177,7 @@ class CMD_SickGearShowSeasons(ApiCall): for cur_result in sql_result: curEpisode = int(cur_result["episode"]) del cur_result["episode"] - status, quality = Quality.splitCompositeStatus(int(cur_result["status"])) + status, quality = Quality.split_composite_status(int(cur_result["status"])) cur_result["status"] = _get_status_Strings(status) cur_result["quality"] = _get_quality_string(quality) timezone, cur_result['timezone'] = network_timezones.get_network_timezone(show_obj.network, @@ -4262,7 +4262,7 @@ class CMD_SickGearShowSetQuality(ApiCall): aqualityID.append(quality_map[quality]) if iqualityID or aqualityID: - newQuality = Quality.combineQualities(iqualityID, aqualityID) + newQuality = Quality.combine_qualities(iqualityID, aqualityID) show_obj.quality = newQuality show_obj.upgrade_once = self.upgradeonce @@ -4326,7 +4326,7 @@ class CMD_SickGearShowStats(ApiCall): # add all the downloaded qualities episode_qualities_counts_download = {"total": 0} for statusCode in Quality.DOWNLOADED: - status, quality = Quality.splitCompositeStatus(statusCode) + status, quality = Quality.split_composite_status(statusCode) if quality in [Quality.NONE]: continue episode_qualities_counts_download[statusCode] = 0 @@ -4334,7 +4334,7 @@ class CMD_SickGearShowStats(ApiCall): # add all snatched qualities episode_qualities_counts_snatch = {"total": 0} for statusCode in Quality.SNATCHED_ANY: - status, quality = Quality.splitCompositeStatus(statusCode) + status, quality = Quality.split_composite_status(statusCode) if quality in [Quality.NONE]: continue episode_qualities_counts_snatch[statusCode] = 0 @@ -4345,7 +4345,7 @@ class CMD_SickGearShowStats(ApiCall): [self.prodid, self.tvid]) # the main loop that goes through all episodes for cur_result in sql_result: - status, quality = Quality.splitCompositeStatus(int(cur_result["status"])) + status, quality = Quality.split_composite_status(int(cur_result["status"])) episode_status_counts_total["total"] += 1 @@ -4367,7 +4367,7 @@ class CMD_SickGearShowStats(ApiCall): if "total" == statusCode: episodes_stats["downloaded"]["total"] = episode_qualities_counts_download[statusCode] continue - status, quality = Quality.splitCompositeStatus(int(statusCode)) + status, quality = Quality.split_composite_status(int(statusCode)) statusString = Quality.qualityStrings[quality].lower().replace(" ", "_").replace("(", "").replace(")", "") episodes_stats["downloaded"][statusString] = episode_qualities_counts_download[statusCode] @@ -4378,7 +4378,7 @@ class CMD_SickGearShowStats(ApiCall): if "total" == statusCode: episodes_stats["snatched"]["total"] = episode_qualities_counts_snatch[statusCode] continue - status, quality = Quality.splitCompositeStatus(int(statusCode)) + status, quality = Quality.split_composite_status(int(statusCode)) statusString = Quality.qualityStrings[quality].lower().replace(" ", "_").replace("(", "").replace(")", "") if Quality.qualityStrings[quality] in episodes_stats["snatched"]: episodes_stats["snatched"][statusString] += episode_qualities_counts_snatch[statusCode] @@ -4390,7 +4390,7 @@ class CMD_SickGearShowStats(ApiCall): if "total" == statusCode: episodes_stats["total"] = episode_status_counts_total[statusCode] continue - status, quality = Quality.splitCompositeStatus(int(statusCode)) + status, quality = Quality.split_composite_status(int(statusCode)) statusString = statusStrings.statusStrings[statusCode].lower().replace(" ", "_").replace("(", "").replace( ")", "") episodes_stats[statusString] = episode_status_counts_total[statusCode] @@ -4653,7 +4653,7 @@ class CMD_SickGearShowsForceUpdate(ApiCall): or sickgear.show_update_scheduler.action.amActive: return _responds(RESULT_FAILURE, msg="show update already running.") - result = sickgear.show_update_scheduler.forceRun() + result = sickgear.show_update_scheduler.force_run() if result: return _responds(RESULT_SUCCESS, msg="daily show update started") return _responds(RESULT_FAILURE, msg="can't start show update currently") diff --git a/sickgear/webserve.py b/sickgear/webserve.py index 18f14bd9..6ccdec7f 100644 --- a/sickgear/webserve.py +++ b/sickgear/webserve.py @@ -19,6 +19,7 @@ from __future__ import with_statement, division # noinspection PyProtectedMember from mimetypes import MimeTypes +from urllib.parse import urljoin import base64 import copy @@ -41,13 +42,21 @@ from json_helper import json_dumps, json_loads import sg_helpers from sg_helpers import remove_file, scantree, is_virtualenv +from sg_futures import SgThreadPoolExecutor +try: + from multiprocessing import cpu_count +except ImportError: + # some platforms don't have multiprocessing + def cpu_count(): + return None + import sickgear from . import classes, clients, config, db, helpers, history, image_cache, logger, name_cache, naming, \ network_timezones, notifiers, nzbget, processTV, sab, scene_exceptions, search_queue, subtitles, ui from .anime import AniGroupList, pull_anidb_groups, short_group_names from .browser import folders_at_path from .common import ARCHIVED, DOWNLOADED, FAILED, IGNORED, SKIPPED, SNATCHED, SNATCHED_ANY, UNAIRED, UNKNOWN, WANTED, \ - SD, HD720p, HD1080p, UHD2160p, Overview, Quality, qualityPresetStrings, statusStrings + SD, HD720p, HD1080p, UHD2160p, Overview, Quality, qualityPresetStrings, statusStrings from .helpers import get_media_stats, has_image_ext, real_path, remove_article, remove_file_perm, starify from .indexermapper import MapStatus, map_indexers_to_show, save_mapping from .indexers.indexer_config import TVINFO_IMDB, TVINFO_TMDB, TVINFO_TRAKT, TVINFO_TVDB, TVINFO_TVMAZE, \ @@ -72,13 +81,9 @@ from unidecode import unidecode import dateutil.parser from tornado import gen, iostream -# noinspection PyUnresolvedReferences +from tornado.escape import utf8 from tornado.web import RequestHandler, StaticFileHandler, authenticated from tornado.concurrent import run_on_executor -# tornado.web.RequestHandler above is unresolved until... -# 1) RouteHandler derives from RequestHandler instead of LegacyBaseHandler -# 2) the following line is removed (plus the noinspection deleted) -from ._legacy import LegacyBaseHandler from lib import subliminal from lib.cfscrape import CloudflareScraper @@ -98,6 +103,7 @@ from six import binary_type, integer_types, iteritems, iterkeys, itervalues, mov if False: from typing import Any, AnyStr, Dict, List, Optional, Set, Tuple from sickgear.providers.generic import TorrentProvider + from tv import TVInfoShow # noinspection PyAbstractClass @@ -187,7 +193,50 @@ class BaseStaticFileHandler(StaticFileHandler): self.set_header('X-Frame-Options', 'SAMEORIGIN') -class RouteHandler(LegacyBaseHandler): +class RouteHandler(RequestHandler): + + executor = SgThreadPoolExecutor(thread_name_prefix='WEBSERVER', max_workers=min(32, (cpu_count() or 1) + 4)) + + def redirect(self, url, permanent=False, status=None): + """Send a redirect to the given (optionally relative) URL. + + ----->>>>> NOTE: Removed self.finish <<<<<----- + + If the ``status`` argument is specified, that value is used as the + HTTP status code; otherwise either 301 (permanent) or 302 + (temporary) is chosen based on the ``permanent`` argument. + The default is 302 (temporary). + """ + if not url.startswith(sickgear.WEB_ROOT): + url = sickgear.WEB_ROOT + url + + # noinspection PyUnresolvedReferences + if self._headers_written: + raise Exception('Cannot redirect after headers have been written') + if status is None: + status = 301 if permanent else 302 + else: + assert isinstance(status, int) + assert 300 <= status <= 399 + self.set_status(status) + self.set_header('Location', urljoin(utf8(self.request.uri), utf8(url))) + + def write_error(self, status_code, **kwargs): + body = '' + try: + if self.request.body: + body = '\nRequest body: %s' % decode_str(self.request.body) + except (BaseException, Exception): + pass + logger.log('Sent %s error response to a `%s` request for `%s` with headers:\n%s%s' % + (status_code, self.request.method, self.request.path, self.request.headers, body), logger.WARNING) + # suppress traceback by removing 'exc_info' kwarg + if 'exc_info' in kwargs: + logger.log('Gracefully handled exception text:\n%s' % traceback.format_exception(*kwargs["exc_info"]), + logger.DEBUG) + del kwargs['exc_info'] + return super(RouteHandler, self).write_error(status_code, **kwargs) + def data_received(self, *args): pass @@ -307,7 +356,7 @@ class BaseHandler(RouteHandler): elif 'fanart' == which[0:6]: image_file_name = [cache_obj.fanart_path( *tvid_prodid_obj.tuple + - ('%s' % (re.sub(r'.*?fanart_(\d+(?:\.\w{1,20})?\.\w{5,8}).*', r'\1.', which, 0, re.I)),))] + ('%s' % (re.sub(r'.*?fanart_(\d+(?:\.\w{1,20})?\.\w{5,8}).*', r'\1.', which, 0, re.I)),))] for cur_name in image_file_name: if os.path.isfile(cur_name): @@ -618,7 +667,7 @@ class RepoHandler(BaseStaticFileHandler): return self.index([('resource.language.en_gb/', 'English/')[self.kodi_is_legacy]]) def render_kodi_service_sickgear_watchedstate_updater_resources_language_english_index(self): - return self.index([('strings.po', 'strings.xml')[self.kodi_is_legacy]]) + return self.index([('strings.po', 'strings.xml')[self.kodi_is_legacy]]) def repo_sickgear_details(self): return re.findall(r'(?si)addon\sid="(repository\.[^"]+)[^>]+version="([^"]+)', @@ -875,9 +924,10 @@ class LogfileHandler(BaseHandler): super(LogfileHandler, self).__init__(application, request, **kwargs) self.lock = threading.Lock() + # noinspection PyUnusedLocal @authenticated @gen.coroutine - def get(self, path, *args, **kwargs): + def get(self, *args, **kwargs): logfile_name = logger.current_log_file() try: @@ -1127,7 +1177,7 @@ class MainHandler(WebHandler): # make a dict out of the sql results sql_result = [dict(row) for row in sql_result - if Quality.splitCompositeStatus(helpers.try_int(row['status']))[0] not in + if Quality.split_composite_status(helpers.try_int(row['status']))[0] not in SNATCHED_ANY + [DOWNLOADED, ARCHIVED, IGNORED, SKIPPED]] # multi dimension sort @@ -1178,8 +1228,8 @@ class MainHandler(WebHandler): pass if imdb_id: sql_result[index]['imdb_url'] = sickgear.indexers.indexer_config.tvinfo_config[ - sickgear.indexers.indexer_config.TVINFO_IMDB][ - 'show_url'] % imdb_id + sickgear.indexers.indexer_config.TVINFO_IMDB][ + 'show_url'] % imdb_id else: sql_result[index]['imdb_url'] = '' @@ -1282,7 +1332,7 @@ class MainHandler(WebHandler): now = datetime.datetime.now() events = [ - ('recent', sickgear.recent_search_scheduler.timeLeft), + ('recent', sickgear.recent_search_scheduler.time_left), ('backlog', sickgear.backlog_search_scheduler.next_backlog_timeleft), ] @@ -1996,7 +2046,7 @@ class Home(MainHandler): if not line.strip(): continue if line.startswith(' '): - change_parts = re.findall(r'^[\W]+(.*)$', line) + change_parts = re.findall(r'^\W+(.*)$', line) change['text'] += change_parts and (' %s' % change_parts[0].strip()) or '' else: if change: @@ -2008,11 +2058,11 @@ class Home(MainHandler): elif not max_rel: break elif line.startswith('### '): - rel_data = re.findall(r'(?im)^###\W*([^\s]+)\W\(([^)]+)\)', line) + rel_data = re.findall(r'(?im)^###\W*(\S+)\W\(([^)]+)\)', line) rel_data and output.append({'type': 'rel', 'ver': rel_data[0][0], 'date': rel_data[0][1]}) max_rel -= 1 elif line.startswith('# '): - max_data = re.findall(r'^#\W*([\d]+)\W*$', line) + max_data = re.findall(r'^#\W*(\d+)\W*$', line) max_rel = max_data and helpers.try_int(max_data[0], None) or 5 if change: output.append(change) @@ -2071,6 +2121,7 @@ class Home(MainHandler): else: self.redirect('/home/') + # noinspection PyUnusedLocal def season_render(self, tvid_prodid=None, season=None, **kwargs): response = {'success': False} @@ -2309,7 +2360,7 @@ class Home(MainHandler): status_overview = show_obj.get_overview(row['status']) if status_overview: ep_counts[status_overview] += row['cnt'] - if ARCHIVED == Quality.splitCompositeStatus(row['status'])[0]: + if ARCHIVED == Quality.split_composite_status(row['status'])[0]: ep_counts['archived'].setdefault(row['season'], 0) ep_counts['archived'][row['season']] = row['cnt'] + ep_counts['archived'].get(row['season'], 0) else: @@ -2376,7 +2427,7 @@ class Home(MainHandler): t.clean_show_name = quote_plus(sickgear.indexermapper.clean_show_name(show_obj.name)) - t.min_initial = Quality.get_quality_ui(min(Quality.splitQuality(show_obj.quality)[0])) + t.min_initial = Quality.get_quality_ui(min(Quality.split_quality(show_obj.quality)[0])) t.show_obj.exceptions = scene_exceptions.get_scene_exceptions(show_obj.tvid, show_obj.prodid) # noinspection PyUnresolvedReferences t.all_scene_exceptions = show_obj.exceptions # normally Unresolved as not a class attribute, force set above @@ -2422,7 +2473,7 @@ class Home(MainHandler): sorted_show_list[i].unique_name = '%s (%s)' % (sorted_show_list[i].name, start_year) dups[sorted_show_list[i].unique_name] = i - name_cache.buildNameCache() + name_cache.build_name_cache() @staticmethod def sorted_show_lists(): @@ -2577,12 +2628,12 @@ class Home(MainHandler): for k, v in iteritems(new_ids): if None is v.get('id') or None is v.get('status'): continue - if (show_obj.ids.get(k, {'id': 0}).get('id') != v.get('id') or - (MapStatus.NO_AUTOMATIC_CHANGE == v.get('status') and - MapStatus.NO_AUTOMATIC_CHANGE != show_obj.ids.get( - k, {'status': MapStatus.NONE}).get('status')) or - (MapStatus.NO_AUTOMATIC_CHANGE != v.get('status') and - MapStatus.NO_AUTOMATIC_CHANGE == show_obj.ids.get( + if (show_obj.ids.get(k, {'id': 0}).get('id') != v.get('id') + or (MapStatus.NO_AUTOMATIC_CHANGE == v.get('status') + and MapStatus.NO_AUTOMATIC_CHANGE != show_obj.ids.get( + k, {'status': MapStatus.NONE}).get('status')) + or (MapStatus.NO_AUTOMATIC_CHANGE != v.get('status') + and MapStatus.NO_AUTOMATIC_CHANGE == show_obj.ids.get( k, {'status': MapStatus.NONE}).get('status'))): show_obj.ids[k]['id'] = (0, v['id'])[v['id'] >= 0] show_obj.ids[k]['status'] = (MapStatus.NOT_FOUND, v['status'])[v['id'] != 0] @@ -2837,7 +2888,7 @@ class Home(MainHandler): errors = [] with show_obj.lock: - show_obj.quality = Quality.combineQualities(list(map(int, any_qualities)), list(map(int, best_qualities))) + show_obj.quality = Quality.combine_qualities(list(map(int, any_qualities)), list(map(int, best_qualities))) show_obj.upgrade_once = upgrade_once # reversed for now @@ -3032,6 +3083,7 @@ class Home(MainHandler): self.redirect('/home/view-show?tvid_prodid=%s' % show_obj.tvid_prodid) + # noinspection PyUnusedLocal def subtitle_show(self, tvid_prodid=None, force=0): if None is tvid_prodid: @@ -3050,6 +3102,7 @@ class Home(MainHandler): self.redirect('/home/view-show?tvid_prodid=%s' % show_obj.tvid_prodid) + # noinspection PyUnusedLocal def update_mb(self, tvid_prodid=None, **kwargs): if notifiers.NotifierFactory().get('EMBY').update_library( @@ -3115,7 +3168,7 @@ class Home(MainHandler): return json_dumps({'result': 'error'}) return self._generic_message('Error', err_msg) - min_initial = min(Quality.splitQuality(show_obj.quality)[0]) + min_initial = min(Quality.split_quality(show_obj.quality)[0]) segments = {} if None is not eps: @@ -3157,12 +3210,12 @@ class Home(MainHandler): if ARCHIVED == status: if ep_obj.status in Quality.DOWNLOADED or direct: - ep_obj.status = Quality.compositeStatus( - ARCHIVED, (Quality.splitCompositeStatus(ep_obj.status)[1], min_initial)[use_default]) + ep_obj.status = Quality.composite_status( + ARCHIVED, (Quality.split_composite_status(ep_obj.status)[1], min_initial)[use_default]) elif DOWNLOADED == status: if ep_obj.status in Quality.ARCHIVED: - ep_obj.status = Quality.compositeStatus( - DOWNLOADED, Quality.splitCompositeStatus(ep_obj.status)[1]) + ep_obj.status = Quality.composite_status( + DOWNLOADED, Quality.split_composite_status(ep_obj.status)[1]) else: ep_obj.status = status @@ -3248,12 +3301,12 @@ class Home(MainHandler): for _cur_ep_obj in cur_ep_obj.related_ep_obj + [cur_ep_obj]: if _cur_ep_obj in ep_obj_rename_list: break - ep_status, ep_qual = Quality.splitCompositeStatus(_cur_ep_obj.status) + ep_status, ep_qual = Quality.split_composite_status(_cur_ep_obj.status) if not ep_qual: continue ep_obj_rename_list.append(cur_ep_obj) else: - ep_status, ep_qual = Quality.splitCompositeStatus(cur_ep_obj.status) + ep_status, ep_qual = Quality.split_composite_status(cur_ep_obj.status) if not ep_qual: continue ep_obj_rename_list.append(cur_ep_obj) @@ -3330,7 +3383,7 @@ class Home(MainHandler): # retrieve the episode object and fail if we can't get one ep_obj = self._get_episode(tvid_prodid, season, episode) if not isinstance(ep_obj, str): - if UNKNOWN == Quality.splitCompositeStatus(ep_obj.status)[0]: + if UNKNOWN == Quality.split_composite_status(ep_obj.status)[0]: ep_obj.status = SKIPPED # make a queue item for the TVEpisode and put it on the queue @@ -3400,7 +3453,7 @@ class Home(MainHandler): seen_eps.add(uniq_sxe) for snatched in filter(lambda s: ((s.tvid, s.prodid, s.season, s.episode) not in seen_eps), - item.snatched_eps): + item.snatched_eps): ep_obj = getattr(snatched, 'ep_obj', None) if not ep_obj: continue @@ -3435,9 +3488,9 @@ class Home(MainHandler): """ # Find the quality class for the episode quality_class = Quality.qualityStrings[Quality.UNKNOWN] - ep_status, ep_quality = Quality.splitCompositeStatus(ep_type.status) + ep_status, ep_quality = Quality.split_composite_status(ep_type.status) for x in (SD, HD720p, HD1080p, UHD2160p): - if ep_quality in Quality.splitQuality(x)[0]: + if ep_quality in Quality.split_quality(x)[0]: quality_class = qualityPresetStrings[x] break @@ -3466,7 +3519,7 @@ class Home(MainHandler): if isinstance(ep_obj, str): return json_dumps({'result': 'failure'}) - # try do download subtitles for that episode + # try to download subtitles for that episode try: previous_subtitles = set([subliminal.language.Language(x) for x in ep_obj.subtitles]) ep_obj.subtitles = set([x.language for x in next(itervalues(ep_obj.download_subtitles()))]) @@ -3880,7 +3933,7 @@ class HomeProcessMedia(Home): regexp = re.compile(r'(?i)', flags=re.UNICODE) result = regexp.sub('\n', result) if None is not quiet and 1 == int(quiet): - regexp = re.compile(u'(?i)]+>([^<]+)<[/]a>', flags=re.UNICODE) + regexp = re.compile(u'(?i)]+>([^<]+)', flags=re.UNICODE) return u'%s' % regexp.sub(r'\1', result) return self._generic_message('Postprocessing results', u'
%s
' % result) @@ -3985,7 +4038,7 @@ class AddShows(Home): r'(?P[^ ]+themoviedb\.org/tv/(?P\d+)[^ ]*)|' r'(?P[^ ]+trakt\.tv/shows/(?P[^ /]+)[^ ]*)|' r'(?P[^ ]+thetvdb\.com/series/(?P[^ /]+)[^ ]*)|' - r'(?P[^ ]+thetvdb\.com/[^\d]+(?P[^ /]+)[^ ]*)|' + r'(?P[^ ]+thetvdb\.com/\D+(?P[^ /]+)[^ ]*)|' r'(?P[^ ]+tvmaze\.com/shows/(?P\d+)/?[^ ]*)', search_term) if id_check: for cur_match in id_check: @@ -4035,7 +4088,7 @@ class AddShows(Home): t = sickgear.TVInfoAPI(cur_tvid).setup(**tvinfo_config) results.setdefault(cur_tvid, {}) try: - for cur_result in t.search_show(list(used_search_term), ids=ids_search_used): + for cur_result in t.search_show(list(used_search_term), ids=ids_search_used): # type: TVInfoShow if TVINFO_TRAKT == cur_tvid and not cur_result['ids'].tvdb: continue tv_src_id = int(cur_result['id']) @@ -4679,7 +4732,7 @@ class AddShows(Home): def parse_imdb_html(self, html, filtered, kwargs): - img_size = re.compile(r'(?im)(V1[^XY]+([XY]))(\d+)([^\d]+)(\d+)([^\d]+)(\d+)([^\d]+)(\d+)([^\d]+)(\d+)(.*?)$') + img_size = re.compile(r'(?im)(V1[^XY]+([XY]))(\d+)(\D+)(\d+)(\D+)(\d+)(\D+)(\d+)(\D+)(\d+)(.*?)$') with BS4Parser(html, features=['html5lib', 'permissive']) as soup: show_list = soup.select('.lister-list') @@ -5200,7 +5253,7 @@ class AddShows(Home): channel_tag_copy = copy.copy(channel_tag) if channel_tag_copy: network = channel_tag_copy.a.extract().get_text(strip=True) - date_info = re.sub(r'^[^\d]+', '', channel_tag_copy.get_text(strip=True)) + date_info = re.sub(r'^\D+', '', channel_tag_copy.get_text(strip=True)) if date_info: dt = dateutil.parser.parse((date_info, '%s.01.01' % date_info)[4 == len(date_info)]) @@ -5209,7 +5262,7 @@ class AddShows(Home): and 'printed' in ' '.join(t.get('class', ''))] if len(tag): age_args = {} - future = re.sub(r'[^\d]+(.*)', r'\1', tag[0].get_text(strip=True)) + future = re.sub(r'\D+(.*)', r'\1', tag[0].get_text(strip=True)) for (dim, rcx) in rc: value = helpers.try_int(rcx.sub(r'\1', future), None) if value: @@ -5237,7 +5290,7 @@ class AddShows(Home): genres = row.find(class_='genre') if genres: - genres = re.sub(r',([^\s])', r', \1', genres.get_text(strip=True)) + genres = re.sub(r',(\S)', r', \1', genres.get_text(strip=True)) overview = row.find(class_='summary') if overview: overview = overview.get_text(strip=True) @@ -6031,7 +6084,7 @@ class AddShows(Home): any_qualities = [any_qualities] if type(best_qualities) != list: best_qualities = [best_qualities] - new_quality = Quality.combineQualities(list(map(int, any_qualities)), list(map(int, best_qualities))) + new_quality = Quality.combine_qualities(list(map(int, any_qualities)), list(map(int, best_qualities))) upgrade_once = config.checkbox_to_value(upgrade_once) wanted_begin = config.minimax(wanted_begin, 0, -1, 10) @@ -6226,7 +6279,7 @@ class Manage(MainHandler): if cur_season not in result: result[cur_season] = {} - cur_quality = Quality.splitCompositeStatus(int(cur_result['status']))[1] + cur_quality = Quality.split_composite_status(int(cur_result['status']))[1] result[cur_season][cur_episode] = {'name': cur_result['name'], 'airdateNever': 1000 > int(cur_result['airdate']), 'qualityCss': Quality.get_quality_css(cur_quality), @@ -6246,9 +6299,9 @@ class Manage(MainHandler): if event_sql_result: for cur_result_event in event_sql_result: if None is d_status and cur_result_event['action'] in Quality.DOWNLOADED: - d_status, d_qual = Quality.splitCompositeStatus(cur_result_event['action']) + d_status, d_qual = Quality.split_composite_status(cur_result_event['action']) if None is s_status and cur_result_event['action'] in Quality.SNATCHED_ANY: - s_status, s_quality = Quality.splitCompositeStatus(cur_result_event['action']) + s_status, s_quality = Quality.split_composite_status(cur_result_event['action']) aged = ((datetime.datetime.now() - datetime.datetime.strptime(str(cur_result_event['date']), sickgear.history.dateFormat)) @@ -6289,11 +6342,11 @@ class Manage(MainHandler): if Quality.NONE == cur_quality: return undo_from_history, change_to, status - cur_status = Quality.splitCompositeStatus(int(cur_status))[0] + cur_status = Quality.split_composite_status(int(cur_status))[0] if any([location]): undo_from_history = True change_to = statusStrings[DOWNLOADED] - status = [Quality.compositeStatus(DOWNLOADED, d_qual or cur_quality)] + status = [Quality.composite_status(DOWNLOADED, d_qual or cur_quality)] elif cur_status in Quality.SNATCHED_ANY + [IGNORED, SKIPPED, WANTED]: if None is d_qual: if cur_status not in [IGNORED, SKIPPED]: @@ -6305,7 +6358,7 @@ class Manage(MainHandler): or sickgear.SKIP_REMOVED_FILES in [ARCHIVED, IGNORED, SKIPPED]: undo_from_history = True change_to = '%s %s' % (statusStrings[ARCHIVED], Quality.qualityStrings[d_qual]) - status = [Quality.compositeStatus(ARCHIVED, d_qual)] + status = [Quality.composite_status(ARCHIVED, d_qual)] elif sickgear.SKIP_REMOVED_FILES in [IGNORED, SKIPPED] \ and cur_status not in [IGNORED, SKIPPED]: change_to = statusStrings[statusStrings[sickgear.SKIP_REMOVED_FILES]] @@ -6893,7 +6946,7 @@ class Manage(MainHandler): new_subtitles = 'on' if new_subtitles else 'off' if 'keep' == quality_preset: - any_qualities, best_qualities = Quality.splitQuality(show_obj.quality) + any_qualities, best_qualities = Quality.split_quality(show_obj.quality) elif int(quality_preset): best_qualities = [] @@ -7110,7 +7163,7 @@ class ManageSearch(Manage): def retry_provider(provider=None): if not provider: return - prov = [p for p in sickgear.providerList + sickgear.newznabProviderList if p.get_id() == provider] + prov = [p for p in sickgear.provider_list + sickgear.newznab_providers if p.get_id() == provider] if not prov: return prov[0].retry_next() @@ -7131,7 +7184,7 @@ class ManageSearch(Manage): # force it to run the next time it looks if not sickgear.search_queue_scheduler.action.is_recentsearch_in_progress(): - result = sickgear.recent_search_scheduler.forceRun() + result = sickgear.recent_search_scheduler.force_run() if result: logger.log(u'Recent search forced') ui.notifications.message('Recent search started') @@ -7142,7 +7195,7 @@ class ManageSearch(Manage): def force_find_propers(self): # force it to run the next time it looks - result = sickgear.proper_finder_scheduler.forceRun() + result = sickgear.proper_finder_scheduler.force_run() if result: logger.log(u'Find propers search forced') ui.notifications.message('Find propers search started') @@ -7166,7 +7219,7 @@ class ShowTasks(Manage): t = PageTemplate(web_handler=self, file='manage_showProcesses.tmpl') t.queue_length = sickgear.show_queue_scheduler.action.queue_length() t.people_queue = sickgear.people_queue_scheduler.action.queue_data() - t.next_run = sickgear.show_update_scheduler.lastRun.replace( + t.next_run = sickgear.show_update_scheduler.last_run.replace( hour=sickgear.show_update_scheduler.start_time.hour) t.show_update_running = sickgear.show_queue_scheduler.action.is_show_update_running() \ or sickgear.show_update_scheduler.action.amActive @@ -7252,7 +7305,7 @@ class ShowTasks(Manage): def force_show_update(self): - result = sickgear.show_update_scheduler.forceRun() + result = sickgear.show_update_scheduler.force_run() if result: logger.log(u'Show Update forced') ui.notifications.message('Forced Show Update started') @@ -7412,7 +7465,7 @@ class History(MainHandler): r['status'] = r['status_w'] r['file_size'] = r['file_size_w'] - r['status'], r['quality'] = Quality.splitCompositeStatus(helpers.try_int(r['status'])) + r['status'], r['quality'] = Quality.split_composite_status(helpers.try_int(r['status'])) r['season'], r['episode'] = '%02i' % r['season'], '%02i' % r['episode'] if r['tvep_id'] not in mru_count: # depends on SELECT ORDER BY date_watched DESC to determine mru_count @@ -7428,9 +7481,9 @@ class History(MainHandler): elif 'stats' in sickgear.HISTORY_LAYOUT: - prov_list = [p.name for p in (sickgear.providerList - + sickgear.newznabProviderList - + sickgear.torrentRssProviderList)] + prov_list = [p.name for p in (sickgear.provider_list + + sickgear.newznab_providers + + sickgear.torrent_rss_providers)] # noinspection SqlResolve sql = 'SELECT COUNT(1) AS count,' \ ' MIN(DISTINCT date) AS earliest,' \ @@ -7462,7 +7515,7 @@ class History(MainHandler): prov_id=p.get_id(), # 2020.03.17 legacy var, remove at future date fails=p.fails.fails_sorted, next_try=p.get_next_try_time, has_limit=getattr(p, 'has_limit', False), tmr_limit_time=p.tmr_limit_time) - for p in sickgear.providerList + sickgear.newznabProviderList])) + for p in sickgear.provider_list + sickgear.newznab_providers])) t.provider_fail_cnt = len([p for p in t.provider_fail_stats if len(p['fails'])]) t.provider_fails = t.provider_fail_cnt # 2020.03.17 legacy var, remove at future date @@ -7841,7 +7894,7 @@ class History(MainHandler): show_obj = helpers.find_show_by_id(tvid_prodid_dict) ep_obj = show_obj.get_episode(cur_result['season'], cur_result['episode']) for n in filter(lambda x: x.name.lower() in ('emby', 'kodi', 'plex'), - notifiers.NotifierFactory().get_enabled()): + notifiers.NotifierFactory().get_enabled()): if 'PLEX' == n.name: if updating: continue @@ -8011,7 +8064,7 @@ class ConfigGeneral(Config): return json_dumps(dict(text='%s\n\n' % ui_output)) @staticmethod - def generate_key(): + def generate_key(*args, **kwargs): """ Return a new randomized API_KEY """ # Create some values to seed md5 @@ -8019,8 +8072,10 @@ class ConfigGeneral(Config): result = hashlib.new('md5', decode_bytes(seed)).hexdigest() - # Return a hex digest of the md5, eg 49f68a5c8493ec2c0bf489821c21fc3b - logger.log(u'New API generated') + # Return a hex digest of the md5, e.g. 49f68a5c8493ec2c0bf489821c21fc3b + app_name = kwargs.get('app_name') + app_name = '' if not app_name else ' for [%s]' % app_name + logger.log(u'New API generated%s' % app_name) return result @@ -8053,8 +8108,8 @@ class ConfigGeneral(Config): any_qualities = ([], any_qualities.split(','))[any(any_qualities)] best_qualities = ([], best_qualities.split(','))[any(best_qualities)] - sickgear.QUALITY_DEFAULT = int(Quality.combineQualities(list(map(int, any_qualities)), - list(map(int, best_qualities)))) + sickgear.QUALITY_DEFAULT = int(Quality.combine_qualities(list(map(int, any_qualities)), + list(map(int, best_qualities)))) sickgear.WANTED_BEGIN_DEFAULT = config.minimax(default_wanted_begin, 0, -1, 10) sickgear.WANTED_LATEST_DEFAULT = config.minimax(default_wanted_latest, 0, -1, 10) sickgear.SHOW_TAG_DEFAULT = default_tag @@ -8067,33 +8122,6 @@ class ConfigGeneral(Config): sickgear.save_config() - @staticmethod - def generateKey(*args, **kwargs): - """ Return a new randomized API_KEY - """ - - try: - from hashlib import md5 - except ImportError: - # noinspection PyUnresolvedReferences,PyCompatibility - from md5 import md5 - - # Create some values to seed md5 - t = str(time.time()) - r = str(random.random()) - - # Create the md5 instance and give it the current time - m = md5(decode_bytes(t)) - - # Update the md5 instance with the random variable - m.update(decode_bytes(r)) - - # Return a hex digest of the md5, eg 49f68a5c8493ec2c0bf489821c21fc3b - app_name = kwargs.get('app_name') - app_name = '' if not app_name else ' for [%s]' % app_name - logger.log(u'New apikey generated%s' % app_name) - return m.hexdigest() - def create_apikey(self, app_name): result = dict() if not app_name: @@ -8101,7 +8129,7 @@ class ConfigGeneral(Config): elif app_name in [k[0] for k in sickgear.API_KEYS if k[0]]: result['result'] = 'Failed: name is not unique' else: - api_key = self.generateKey(app_name=app_name) + api_key = self.generate_key(app_name=app_name) if api_key in [k[1] for k in sickgear.API_KEYS if k[0]]: result['result'] = 'Failed: apikey already exists, try again' else: @@ -8199,7 +8227,7 @@ class ConfigGeneral(Config): sickgear.FANART_LIMIT = config.minimax(fanart_limit, 3, 0, 500) sickgear.SHOWLIST_TAGVIEW = showlist_tagview - # 'Show List' is the must have default fallback. Tags in use that are removed from config ui are restored, + # 'Show List' is the must-have default fallback. Tags in use that are removed from config ui are restored, # not deleted. Deduped list order preservation is key to feature function. my_db = db.DBConnection() sql_result = my_db.select('SELECT DISTINCT tag FROM tv_shows') @@ -8211,7 +8239,7 @@ class ConfigGeneral(Config): results += [u'An attempt was prevented to remove a show list group name still in use'] dedupe = {} sickgear.SHOW_TAGS = [dedupe.setdefault(item, item) for item in (cleanser + new_names + [u'Show List']) - if item not in dedupe] + if item not in dedupe] sickgear.HOME_SEARCH_FOCUS = config.checkbox_to_value(home_search_focus) sickgear.USE_IMDB_INFO = config.checkbox_to_value(use_imdb_info) @@ -8255,7 +8283,7 @@ class ConfigGeneral(Config): sickgear.HANDLE_REVERSE_PROXY = config.checkbox_to_value(handle_reverse_proxy) sickgear.SEND_SECURITY_HEADERS = config.checkbox_to_value(send_security_headers) hosts = ','.join(filter(lambda name: not helpers.re_valid_hostname(with_allowed=False).match(name), - config.clean_hosts(allowed_hosts).split(','))) + config.clean_hosts(allowed_hosts).split(','))) if not hosts or self.request.host_name in hosts: sickgear.ALLOWED_HOSTS = hosts sickgear.ALLOW_ANYIP = config.checkbox_to_value(allow_anyip) @@ -8395,9 +8423,9 @@ class ConfigSearch(Config): sickgear.USENET_RETENTION = config.to_int(usenet_retention, default=500) sickgear.IGNORE_WORDS, sickgear.IGNORE_WORDS_REGEX = helpers.split_word_str(ignore_words - if ignore_words else '') + if ignore_words else '') sickgear.REQUIRE_WORDS, sickgear.REQUIRE_WORDS_REGEX = helpers.split_word_str(require_words - if require_words else '') + if require_words else '') clean_ignore_require_words() @@ -8406,7 +8434,7 @@ class ConfigSearch(Config): sickgear.SEARCH_UNAIRED = bool(config.checkbox_to_value(search_unaired)) sickgear.UNAIRED_RECENT_SEARCH_ONLY = bool(config.checkbox_to_value(unaired_recent_search_only, - value_off=1, value_on=0)) + value_off=1, value_on=0)) sickgear.FLARESOLVERR_HOST = config.clean_url(flaresolverr_host) sg_helpers.FLARESOLVERR_HOST = sickgear.FLARESOLVERR_HOST @@ -8668,9 +8696,9 @@ class ConfigProviders(Config): return json_dumps({'error': 'No Provider Name or url specified'}) provider_dict = dict(zip([sickgear.providers.generic_provider_name(x.get_id()) - for x in sickgear.newznabProviderList], sickgear.newznabProviderList)) + for x in sickgear.newznab_providers], sickgear.newznab_providers)) provider_url_dict = dict(zip([sickgear.providers.generic_provider_url(x.url) - for x in sickgear.newznabProviderList], sickgear.newznabProviderList)) + for x in sickgear.newznab_providers], sickgear.newznab_providers)) temp_provider = newznab.NewznabProvider(name, config.clean_url(url)) @@ -8694,12 +8722,12 @@ class ConfigProviders(Config): error = '\nNo provider %s specified' % error return json_dumps({'success': False, 'error': error}) - if name in [n.name for n in sickgear.newznabProviderList if n.url == url]: - provider = [n for n in sickgear.newznabProviderList if n.name == name][0] + if name in [n.name for n in sickgear.newznab_providers if n.url == url]: + provider = [n for n in sickgear.newznab_providers if n.name == name][0] tv_categories = provider.clean_newznab_categories(provider.all_cats) state = provider.is_enabled() else: - providers = dict(zip([x.get_id() for x in sickgear.newznabProviderList], sickgear.newznabProviderList)) + providers = dict(zip([x.get_id() for x in sickgear.newznab_providers], sickgear.newznab_providers)) temp_provider = newznab.NewznabProvider(name, url, key) if None is not key and starify(key, True): temp_provider.key = providers[temp_provider.get_id()].key @@ -8715,7 +8743,7 @@ class ConfigProviders(Config): return json_dumps({'error': 'Invalid name specified'}) provider_dict = dict( - zip([x.get_id() for x in sickgear.torrentRssProviderList], sickgear.torrentRssProviderList)) + zip([x.get_id() for x in sickgear.torrent_rss_providers], sickgear.torrent_rss_providers)) temp_provider = rsstorrent.TorrentRssProvider(name, url, cookies) @@ -8730,7 +8758,7 @@ class ConfigProviders(Config): @staticmethod def check_providers_ping(): - for p in sickgear.providers.sortedProviderList(): + for p in sickgear.providers.sorted_sources(): if getattr(p, 'ping_iv', None): if p.is_active() and (p.get_id() not in sickgear.provider_ping_thread_pool or not sickgear.provider_ping_thread_pool[p.get_id()].is_alive()): @@ -8748,7 +8776,7 @@ class ConfigProviders(Config): pass # stop removed providers - prov = [n.get_id() for n in sickgear.providers.sortedProviderList()] + prov = [n.get_id() for n in sickgear.providers.sorted_sources()] for p in [x for x in sickgear.provider_ping_thread_pool if x not in prov]: sickgear.provider_ping_thread_pool[p].stop = True try: @@ -8764,7 +8792,7 @@ class ConfigProviders(Config): provider_list = [] # add all the newznab info we have into our list - newznab_sources = dict(zip([x.get_id() for x in sickgear.newznabProviderList], sickgear.newznabProviderList)) + newznab_sources = dict(zip([x.get_id() for x in sickgear.newznab_providers], sickgear.newznab_providers)) active_ids = [] reload_page = False if newznab_string: @@ -8821,18 +8849,18 @@ class ConfigProviders(Config): new_provider.enabled = True _ = new_provider.caps # when adding a custom, trigger server_type update new_provider.enabled = False - sickgear.newznabProviderList.append(new_provider) + sickgear.newznab_providers.append(new_provider) active_ids.append(cur_id) # delete anything that is missing if sickgear.USE_NZBS: - for source in [x for x in sickgear.newznabProviderList if x.get_id() not in active_ids]: - sickgear.newznabProviderList.remove(source) + for source in [x for x in sickgear.newznab_providers if x.get_id() not in active_ids]: + sickgear.newznab_providers.remove(source) # add all the torrent RSS info we have into our list - torrent_rss_sources = dict(zip([x.get_id() for x in sickgear.torrentRssProviderList], - sickgear.torrentRssProviderList)) + torrent_rss_sources = dict(zip([x.get_id() for x in sickgear.torrent_rss_providers], + sickgear.torrent_rss_providers)) active_ids = [] if torrentrss_string: for curTorrentRssProviderStr in torrentrss_string.split('!!!'): @@ -8868,19 +8896,19 @@ class ConfigProviders(Config): if attr_check in kwargs: setattr(torrss_src, attr, str(kwargs.get(attr_check) or '').strip()) else: - sickgear.torrentRssProviderList.append(new_provider) + sickgear.torrent_rss_providers.append(new_provider) active_ids.append(cur_id) # delete anything that is missing if sickgear.USE_TORRENTS: - for source in [x for x in sickgear.torrentRssProviderList if x.get_id() not in active_ids]: - sickgear.torrentRssProviderList.remove(source) + for source in [x for x in sickgear.torrent_rss_providers if x.get_id() not in active_ids]: + sickgear.torrent_rss_providers.remove(source) # enable/disable states of source providers provider_str_list = provider_order.split() - sources = dict(zip([x.get_id() for x in sickgear.providers.sortedProviderList()], - sickgear.providers.sortedProviderList())) + sources = dict(zip([x.get_id() for x in sickgear.providers.sorted_sources()], + sickgear.providers.sorted_sources())) for cur_src_str in provider_str_list: src_name, src_enabled = cur_src_str.split(':') @@ -8904,7 +8932,7 @@ class ConfigProviders(Config): torrent_rss_sources[src_name].enabled = src_enabled # update torrent source settings - for torrent_src in [src for src in sickgear.providers.sortedProviderList() + for torrent_src in [src for src in sickgear.providers.sorted_sources() if sickgear.GenericProvider.TORRENT == src.providerType]: # type: TorrentProvider src_id_prefix = torrent_src.get_id() + '_' @@ -8951,7 +8979,7 @@ class ConfigProviders(Config): setattr(torrent_src, attr, str(kwargs.get(src_id_prefix + attr) or default).strip()) # update nzb source settings - for nzb_src in [src for src in sickgear.providers.sortedProviderList() if + for nzb_src in [src for src in sickgear.providers.sorted_sources() if sickgear.GenericProvider.NZB == src.providerType]: src_id_prefix = nzb_src.get_id() + '_' @@ -8979,7 +9007,7 @@ class ConfigProviders(Config): if hasattr(nzb_src, attr): setattr(nzb_src, attr, str(kwargs.get(src_id_prefix + attr) or default).strip()) - sickgear.NEWZNAB_DATA = '!!!'.join([x.config_str() for x in sickgear.newznabProviderList]) + sickgear.NEWZNAB_DATA = '!!!'.join([x.config_str() for x in sickgear.newznab_providers]) sickgear.PROVIDER_ORDER = provider_list helpers.clear_unused_providers() @@ -9487,17 +9515,11 @@ class EventLogs(MainHandler): class WebFileBrowser(MainHandler): def index(self, path='', include_files=False, **kwargs): - """ prevent issues with requests using legacy params """ - include_files = include_files or kwargs.get('includeFiles') or False - """ /legacy """ self.set_header('Content-Type', 'application/json') return json_dumps(folders_at_path(path, True, bool(int(include_files)))) def complete(self, term, include_files=0, **kwargs): - """ prevent issues with requests using legacy params """ - include_files = include_files or kwargs.get('includeFiles') or False - """ /legacy """ self.set_header('Content-Type', 'application/json') return json_dumps([entry['path'] for entry in folders_at_path( @@ -9700,7 +9722,7 @@ class CachedImages(MainHandler): :param tvid_prodid: :param thumb: return thumb or normal as fallback :param pid: optional person_id - :param prefer_person: prefer person image if person_id is set and character has more then 1 person assigned + :param prefer_person: prefer person image if person_id is set and character has more than 1 person assigned """ _ = kwargs.get('oid') # suppress pyc non used var highlight, oid (original id) is a visual ui key show_obj = tvid_prodid and helpers.find_show_by_id(tvid_prodid) diff --git a/sickgear/webserveInit.py b/sickgear/webserveInit.py index 8f1e4ad5..025afaa6 100644 --- a/sickgear/webserveInit.py +++ b/sickgear/webserveInit.py @@ -1,5 +1,5 @@ import os -from sys import exc_info, platform +from sys import exc_info import threading from tornado.ioloop import IOLoop @@ -8,13 +8,9 @@ from tornado.routing import AnyMatches, Rule from tornado.web import Application, _ApplicationRouter from . import logger, webapi, webserve -from ._legacy import LegacyConfigPostProcessing, LegacyHomeAddShows, \ - LegacyManageManageSearches, LegacyManageShowProcesses, LegacyErrorLogs from .helpers import create_https_certificates, re_valid_hostname import sickgear -from _23 import PY38 - # noinspection PyUnreachableCode if False: # noinspection PyUnresolvedReferences @@ -218,22 +214,6 @@ class WebServer(threading.Thread): (r'%s/api/builder(/?)(.*)' % self.options['web_root'], webserve.ApiBuilder), (r'%s/api(/?.*)' % self.options['web_root'], webapi.Api), # ---------------------------------------------------------------------------------------------------------- - # legacy deprecated Aug 2019 - (r'%s/home/addShows/?$' % self.options['web_root'], LegacyHomeAddShows), - (r'%s/manage/manageSearches/?$' % self.options['web_root'], LegacyManageManageSearches), - (r'%s/manage/showProcesses/?$' % self.options['web_root'], LegacyManageShowProcesses), - (r'%s/config/postProcessing/?$' % self.options['web_root'], LegacyConfigPostProcessing), - (r'%s/errorlogs/?$' % self.options['web_root'], LegacyErrorLogs), - (r'%s/home/is_alive(/?.*)' % self.options['web_root'], webserve.IsAliveHandler), - (r'%s/home/addShows(/?.*)' % self.options['web_root'], webserve.AddShows), - (r'%s/manage/manageSearches(/?.*)' % self.options['web_root'], webserve.ManageSearch), - (r'%s/manage/showProcesses(/?.*)' % self.options['web_root'], webserve.ShowTasks), - (r'%s/config/postProcessing(/?.*)' % self.options['web_root'], webserve.ConfigMediaProcess), - (r'%s/errorlogs(/?.*)' % self.options['web_root'], webserve.EventLogs), - # ---------------------------------------------------------------------------------------------------------- - # legacy deprecated Aug 2019 - never remove as used in external scripts - (r'%s/home/postprocess(/?.*)' % self.options['web_root'], webserve.HomeProcessMedia), - (r'%s(/?update_watched_state_kodi/?)' % self.options['web_root'], webserve.NoXSRFHandler), # regular catchall routes - keep here at the bottom (r'%s/home(/?.*)' % self.options['web_root'], webserve.Home), (r'%s/manage/(/?.*)' % self.options['web_root'], webserve.Manage), @@ -255,9 +235,6 @@ class WebServer(threading.Thread): # python 3 needs to start event loop first import asyncio - if 'win32' == platform and PY38: - # noinspection PyUnresolvedReferences - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) asyncio.set_event_loop(asyncio.new_event_loop()) from tornado.platform.asyncio import AnyThreadEventLoopPolicy asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) diff --git a/tests/common_tests.py b/tests/common_tests.py index 5c5ad53a..b254283b 100644 --- a/tests/common_tests.py +++ b/tests/common_tests.py @@ -135,7 +135,7 @@ class QualityTests(unittest.TestCase): def check_quality_names(self, quality, cases): for fn in cases: - second = common.Quality.nameQuality(fn) + second = common.Quality.name_quality(fn) self.assertEqual(quality, second, msg='fail [%s] != [%s] for case: %s' % (Quality.qualityStrings[quality], Quality.qualityStrings[second], fn)) @@ -148,7 +148,7 @@ class QualityTests(unittest.TestCase): def check_wantedquality_list(self, cases): for show_quality, result in cases: - sq = common.Quality.combineQualities(*show_quality) + sq = common.Quality.combine_qualities(*show_quality) wd = common.WantedQualities() _ = wd.get_wantedlist(sq, False, common.Quality.NONE, common.UNAIRED, manual=True) for w, v in iteritems(wd): @@ -158,7 +158,7 @@ class QualityTests(unittest.TestCase): def check_wantedquality_get_wantedlist(self, cases): for show_quality, result in cases: - sq = common.Quality.combineQualities(*show_quality) + sq = common.Quality.combine_qualities(*show_quality) wd = common.WantedQualities() for case, wlist in result: ka = {'qualities': sq} @@ -169,7 +169,7 @@ class QualityTests(unittest.TestCase): def check_sceneQuality(self, cases): msg = 'Test case: "%s", actual: [%s] != expected: [%s]' for show_name, result in cases: - sq = common.Quality.sceneQuality(show_name[0], show_name[1]) + sq = common.Quality.scene_quality(show_name[0], show_name[1]) self.assertEqual(result, sq, msg=msg % (show_name[0], Quality.qualityStrings[sq], Quality.qualityStrings[result])) @@ -177,8 +177,8 @@ class QualityTests(unittest.TestCase): def test_SDTV(self): - self.assertEqual(common.Quality.compositeStatus(common.DOWNLOADED, common.Quality.SDTV), - common.Quality.statusFromName('Test.Show.S01E02-GROUP.mkv')) + self.assertEqual(common.Quality.composite_status(common.DOWNLOADED, common.Quality.SDTV), + common.Quality.status_from_name('Test.Show.S01E02-GROUP.mkv')) def test_qualites(self): self.longMessage = True diff --git a/tests/helpers_tests.py b/tests/helpers_tests.py index e78da024..e80e1827 100644 --- a/tests/helpers_tests.py +++ b/tests/helpers_tests.py @@ -65,7 +65,7 @@ class HelpersTests(unittest.TestCase): ((WANTED, Quality.NONE), True), ] for c, b in test_cases: - self.assertEqual(helpers.should_delete_episode(Quality.compositeStatus(*c)), b) + self.assertEqual(helpers.should_delete_episode(Quality.composite_status(*c)), b) def test_encrypt(self): helpers.unique_key1 = '0x12d48f154876c16164a1646' diff --git a/tests/name_parser_tests.py b/tests/name_parser_tests.py index daa49edc..6e052375 100644 --- a/tests/name_parser_tests.py +++ b/tests/name_parser_tests.py @@ -508,8 +508,8 @@ class MultiSceneNumbering(test.SickbeardTestDBCase): ) my_db = db.DBConnection() my_db.mass_action(c_l) - name_cache.addNameToCache(e_t['show_obj']['name'], tvid=e_t['show_obj']['tvid'], - prodid=e_t['show_obj']['prodid']) + name_cache.add_name_to_cache(e_t['show_obj']['name'], tvid=e_t['show_obj']['tvid'], + prodid=e_t['show_obj']['prodid']) for _t in e_t['tests']: try: res = parser.NameParser(True, convert=True).parse(_t['parse_name']) @@ -533,8 +533,8 @@ class EpisodeNameCases(unittest.TestCase): e_obj.season = e_o['season'] e_obj.episode = e_o['number'] s.sxe_ep_obj.setdefault(e_obj.season, {})[e_obj.episode] = e_obj - name_cache.addNameToCache(e_t['show_obj']['name'], tvid=e_t['show_obj']['tvid'], - prodid=e_t['show_obj']['prodid']) + name_cache.add_name_to_cache(e_t['show_obj']['name'], tvid=e_t['show_obj']['tvid'], + prodid=e_t['show_obj']['prodid']) try: res = parser.NameParser(True).parse(e_t['parse_name']) except (BaseException, Exception): @@ -550,7 +550,7 @@ class InvalidCases(unittest.TestCase): for s in [TVShowTest(name=rls_name, prodid=prodid, tvid=tvid, is_anime=is_anime)]: sickgear.showList.append(s) sickgear.showDict[s.sid_int] = s - name_cache.addNameToCache(show_name, tvid=tvid, prodid=prodid) + name_cache.add_name_to_cache(show_name, tvid=tvid, prodid=prodid) invalidexception = False try: _ = parser.NameParser(True).parse(rls_name) @@ -939,7 +939,7 @@ class ExtraInfoNoNameTests(test.SickbeardTestDBCase): sickgear.showList = [tvs] sickgear.showDict = {tvs.sid_int: tvs} name_cache.nameCache = {} - name_cache.buildNameCache() + name_cache.build_name_cache() np = parser.NameParser() r = np.parse(case[2], cache_result=False) diff --git a/tests/pp_tests.py b/tests/pp_tests.py index f6fd97a4..58e05bf5 100644 --- a/tests/pp_tests.py +++ b/tests/pp_tests.py @@ -27,7 +27,7 @@ import unittest import sickgear from sickgear.helpers import real_path -from sickgear.name_cache import addNameToCache +from sickgear.name_cache import add_name_to_cache from sickgear.postProcessor import PostProcessor from sickgear.processTV import ProcessTVShow from sickgear.tv import TVEpisode, TVShow, logger @@ -94,7 +94,7 @@ class PPBasicTests(test.SickbeardTestDBCase): ep_obj.release_name = 'test setter' ep_obj.save_to_db() - addNameToCache('show name', tvid=TVINFO_TVDB, prodid=3) + add_name_to_cache('show name', tvid=TVINFO_TVDB, prodid=3) sickgear.PROCESS_METHOD = 'move' pp = PostProcessor(test.FILEPATH) diff --git a/tests/scene_helpers_tests.py b/tests/scene_helpers_tests.py index 2827522b..e49e27aa 100644 --- a/tests/scene_helpers_tests.py +++ b/tests/scene_helpers_tests.py @@ -75,7 +75,7 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase): sickgear.showDict[s.sid_int] = s sickgear.webserve.Home.make_showlist_unique_names() scene_exceptions.retrieve_exceptions() - name_cache.buildNameCache() + name_cache.build_name_cache() def test_sceneExceptionsEmpty(self): self.assertEqual(scene_exceptions.get_scene_exceptions(0, 0), []) @@ -99,7 +99,7 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase): sickgear.showList.append(s) sickgear.showDict[s.sid_int] = s scene_exceptions.retrieve_exceptions() - name_cache.buildNameCache() + name_cache.build_name_cache() self.assertEqual(scene_exceptions.get_scene_exception_by_name(u'ブラック・ラグーン'), [1, 79604, -1]) self.assertEqual(scene_exceptions.get_scene_exception_by_name(u'Burakku Ragūn'), [1, 79604, -1]) self.assertEqual(scene_exceptions.get_scene_exception_by_name('Rokka no Yuusha'), [1, 295243, -1]) @@ -114,11 +114,11 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase): my_db.action('DELETE FROM scene_exceptions WHERE 1=1') # put something in the cache - name_cache.addNameToCache('Cached Name', prodid=0) + name_cache.add_name_to_cache('Cached Name', prodid=0) # updating should not clear the cache this time since our exceptions didn't change scene_exceptions.retrieve_exceptions() - self.assertEqual(name_cache.retrieveNameFromCache('Cached Name'), (0, 0)) + self.assertEqual(name_cache.retrieve_name_from_cache('Cached Name'), (0, 0)) if '__main__' == __name__: diff --git a/tests/show_tests.py b/tests/show_tests.py index 06471da0..82bac9b0 100644 --- a/tests/show_tests.py +++ b/tests/show_tests.py @@ -31,7 +31,7 @@ from sickgear.tv import TVEpisode, TVShow wanted_tests = [ dict( name='Start and End', - show=dict(indexer=1, indexerid=1, quality=Quality.combineQualities([Quality.SDTV], [])), + show=dict(indexer=1, indexerid=1, quality=Quality.combine_qualities([Quality.SDTV], [])), episodes=[ dict(season=1, episode=1, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 1)), dict(season=1, episode=2, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 1)), @@ -64,7 +64,7 @@ wanted_tests = [ dict( name='Start and End, entire season', - show=dict(indexer=1, indexerid=10, quality=Quality.combineQualities([Quality.SDTV], [])), + show=dict(indexer=1, indexerid=10, quality=Quality.combine_qualities([Quality.SDTV], [])), episodes=[ dict(season=1, episode=1, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 2)), dict(season=1, episode=2, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 1)), @@ -97,7 +97,7 @@ wanted_tests = [ dict( name='Start, entire season', - show=dict(indexer=1, indexerid=210, quality=Quality.combineQualities([Quality.SDTV], [])), + show=dict(indexer=1, indexerid=210, quality=Quality.combine_qualities([Quality.SDTV], [])), episodes=[ dict(season=1, episode=1, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 2)), dict(season=1, episode=2, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 1)), @@ -130,7 +130,7 @@ wanted_tests = [ dict( name='End only', - show=dict(indexer=1, indexerid=2, quality=Quality.combineQualities([Quality.SDTV], [])), + show=dict(indexer=1, indexerid=2, quality=Quality.combine_qualities([Quality.SDTV], [])), episodes=[ dict(season=1, episode=1, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 3)), dict(season=1, episode=2, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 1)), @@ -163,7 +163,7 @@ wanted_tests = [ dict( name='End only, entire season', - show=dict(indexer=1, indexerid=20, quality=Quality.combineQualities([Quality.SDTV], [])), + show=dict(indexer=1, indexerid=20, quality=Quality.combine_qualities([Quality.SDTV], [])), episodes=[ dict(season=1, episode=1, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 4)), dict(season=1, episode=2, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 1)), @@ -196,7 +196,7 @@ wanted_tests = [ dict( name='End only, multi season', - show=dict(indexer=1, indexerid=3, quality=Quality.combineQualities([Quality.SDTV], [])), + show=dict(indexer=1, indexerid=3, quality=Quality.combine_qualities([Quality.SDTV], [])), episodes=[ dict(season=1, episode=1, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 5)), dict(season=1, episode=2, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 1)), @@ -229,7 +229,7 @@ wanted_tests = [ dict( name='End only, multi season, entire season', - show=dict(indexer=1, indexerid=30, quality=Quality.combineQualities([Quality.SDTV], [])), + show=dict(indexer=1, indexerid=30, quality=Quality.combine_qualities([Quality.SDTV], [])), episodes=[ dict(season=1, episode=1, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 6)), dict(season=1, episode=2, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 1)), @@ -262,7 +262,7 @@ wanted_tests = [ dict( name='End only, multi season, cross season', - show=dict(indexer=1, indexerid=33, quality=Quality.combineQualities([Quality.SDTV], [])), + show=dict(indexer=1, indexerid=33, quality=Quality.combine_qualities([Quality.SDTV], [])), episodes=[ dict(season=1, episode=1, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 7)), dict(season=1, episode=2, status=SKIPPED, quality=Quality.NONE, airdate=datetime.date(2019, 1, 1)), @@ -295,7 +295,7 @@ wanted_tests = [ dict( name='all episodes unaired', - show=dict(indexer=1, indexerid=35, quality=Quality.combineQualities([Quality.SDTV], [])), + show=dict(indexer=1, indexerid=35, quality=Quality.combine_qualities([Quality.SDTV], [])), episodes=[ dict(season=1, episode=1, status=UNAIRED, quality=Quality.NONE, airdate=datetime.date.fromordinal(1)), dict(season=1, episode=2, status=UNAIRED, quality=Quality.NONE, airdate=datetime.date.fromordinal(1)), @@ -317,7 +317,7 @@ wanted_tests = [ dict( name='no episodes', - show=dict(indexer=1, indexerid=36, quality=Quality.combineQualities([Quality.SDTV], [])), + show=dict(indexer=1, indexerid=36, quality=Quality.combine_qualities([Quality.SDTV], [])), episodes=[ ], start_wanted=7, end_wanted=3, @@ -332,7 +332,7 @@ wanted_tests = [ dict( name='no episodes, whole first season', - show=dict(indexer=1, indexerid=37, quality=Quality.combineQualities([Quality.SDTV], [])), + show=dict(indexer=1, indexerid=37, quality=Quality.combine_qualities([Quality.SDTV], [])), episodes=[ ], start_wanted=-1, end_wanted=0, @@ -347,7 +347,7 @@ wanted_tests = [ dict( name='no episodes, whole last season', - show=dict(indexer=1, indexerid=38, quality=Quality.combineQualities([Quality.SDTV], [])), + show=dict(indexer=1, indexerid=38, quality=Quality.combine_qualities([Quality.SDTV], [])), episodes=[ ], start_wanted=0, end_wanted=-1, @@ -362,7 +362,7 @@ wanted_tests = [ dict( name='no episodes, whole first and last season', - show=dict(indexer=1, indexerid=39, quality=Quality.combineQualities([Quality.SDTV], [])), + show=dict(indexer=1, indexerid=39, quality=Quality.combine_qualities([Quality.SDTV], [])), episodes=[ ], start_wanted=-1, end_wanted=-1, @@ -408,7 +408,7 @@ class ShowAddTests(test.SickbeardTestDBCase): show_obj.sxe_ep_obj[ep['season']] = {} show_obj.sxe_ep_obj[ep['season']][ep['episode']] = TVEpisode(show_obj, ep['season'], ep['episode']) episode = show_obj.sxe_ep_obj[ep['season']][ep['episode']] - episode.status = Quality.compositeStatus(ep['status'], ep['quality']) + episode.status = Quality.composite_status(ep['status'], ep['quality']) episode.airdate = ep['airdate'] episode.name = 'nothing' episode.epid = ep_id diff --git a/tests/snatch_tests.py b/tests/snatch_tests.py index 6e75c8f6..5240becd 100644 --- a/tests/snatch_tests.py +++ b/tests/snatch_tests.py @@ -57,7 +57,7 @@ class SearchTest(test.SickbeardTestDBCase): return True def __init__(self, something): - for provider in sickgear.providers.sortedProviderList(): + for provider in sickgear.providers.sorted_sources(): provider.get_url = self._fake_getURL #provider.isActive = self._fake_isActive diff --git a/tests/test_lib.py b/tests/test_lib.py index 59b255f4..d8be545b 100644 --- a/tests/test_lib.py +++ b/tests/test_lib.py @@ -91,8 +91,8 @@ sickgear.NAMING_SPORTS_PATTERN = '' sickgear.NAMING_MULTI_EP = 1 sickgear.PROVIDER_ORDER = [] -sickgear.newznabProviderList = providers.getNewznabProviderList('') -sickgear.providerList = providers.makeProviderList() +sickgear.newznab_providers = providers.newznab_source_list('') +sickgear.provider_list = providers.provider_modules() sickgear.PROG_DIR = os.path.abspath('..') # sickgear.DATA_DIR = os.path.join(sickgear.PROG_DIR, 'tests') diff --git a/tests/webapi_tests.py b/tests/webapi_tests.py index 7b5d410b..a7b1c9a9 100644 --- a/tests/webapi_tests.py +++ b/tests/webapi_tests.py @@ -75,7 +75,7 @@ test_shows = [ 'quality_init': [], 'quality_upgrade': [], 'episodes': { 1: { - 1: {'name': 'ep1', 'status': Quality.compositeStatus(DOWNLOADED, Quality.HDWEBDL), + 1: {'name': 'ep1', 'status': Quality.composite_status(DOWNLOADED, Quality.HDWEBDL), 'airdate': old_date, 'description': 'ep1 description'}, 2: {'name': 'ep2', 'status': WANTED, 'airdate': last_week, 'description': 'ep2 description'}, 3: {'name': 'ep3', 'status': WANTED, 'airdate': today, 'description': 'ep3 description'}, @@ -174,17 +174,17 @@ class WebAPICase(test.SickbeardTestDBCase): sickgear.events = Events(None) sickgear.show_queue_scheduler = scheduler.Scheduler( show_queue.ShowQueue(), - cycleTime=datetime.timedelta(seconds=3), - threadName='SHOWQUEUE') + cycle_time=datetime.timedelta(seconds=3), + thread_name='SHOWQUEUE') sickgear.search_queue_scheduler = scheduler.Scheduler( search_queue.SearchQueue(), - cycleTime=datetime.timedelta(seconds=3), - threadName='SEARCHQUEUE') + cycle_time=datetime.timedelta(seconds=3), + thread_name='SEARCHQUEUE') sickgear.backlog_search_scheduler = search_backlog.BacklogSearchScheduler( search_backlog.BacklogSearcher(), - cycleTime=datetime.timedelta(minutes=60), + cycle_time=datetime.timedelta(minutes=60), run_delay=datetime.timedelta(minutes=60), - threadName='BACKLOG') + thread_name='BACKLOG') sickgear.indexermapper.indexer_list = [i for i in sickgear.indexers.indexer_api.TVInfoAPI().all_sources] for root_dirs, path, expected in root_folder_tests: sickgear.ROOT_DIRS = root_dirs @@ -198,8 +198,8 @@ class WebAPICase(test.SickbeardTestDBCase): elif k in show_obj.__dict__: show_obj.__dict__[k] = v if 'quality_init' in cur_show and cur_show['quality_init']: - show_obj.quality = Quality.combineQualities(cur_show['quality_init'], - cur_show.get('quality_upgrade', [])) + show_obj.quality = Quality.combine_qualities(cur_show['quality_init'], + cur_show.get('quality_upgrade', [])) show_obj.dirty = True show_obj.save_to_db(True) @@ -216,7 +216,7 @@ class WebAPICase(test.SickbeardTestDBCase): ep_obj.__dict__[k] = v show_obj.sxe_ep_obj.setdefault(season, {})[ep] = ep_obj ep_obj.save_to_db(True) - status, quality = Quality.splitCompositeStatus(ep_obj.status) + status, quality = Quality.split_composite_status(ep_obj.status) if status in (DOWNLOADED, SNATCHED): s_r = SearchResult([ep_obj]) s_r.show_obj, s_r.quality, s_r.provider, s_r.name = \ @@ -240,8 +240,8 @@ class WebAPICase(test.SickbeardTestDBCase): for cur_show in test_shows: show_obj = sickgear.helpers.find_show_by_id({cur_show['tvid']: cur_show['prodid']}) if 'quality_init' in cur_show and cur_show['quality_init']: - show_obj.quality = Quality.combineQualities(cur_show['quality_init'], - cur_show.get('quality_upgrade', [])) + show_obj.quality = Quality.combine_qualities(cur_show['quality_init'], + cur_show.get('quality_upgrade', [])) else: show_obj.quality = int(sickgear.QUALITY_DEFAULT) show_obj.upgrade_once = int(cur_show.get('upgrade_once', 0)) @@ -821,7 +821,7 @@ class WebAPICase(test.SickbeardTestDBCase): if cur_quality: params.update({'quality': cur_quality_str}) old_status = ep_obj.status - status, quality = Quality.splitCompositeStatus(ep_obj.status) + status, quality = Quality.split_composite_status(ep_obj.status) expect_fail = UNAIRED == status or (DOWNLOADED == status and not cur_quality) expected_msg = (success_msg, failed_msg)[expect_fail] data = self._request_from_api(webapi.CMD_SickGearEpisodeSetStatus, params=params) From ec874504dee657a957bd1d59bb4f73bf05ed73f5 Mon Sep 17 00:00:00 2001 From: JackDandy Date: Fri, 24 Feb 2023 11:46:07 +0000 Subject: [PATCH 06/21] Change rename db variables/functions. --- gui/slick/interfaces/default/config.tmpl | 2 +- lib/sg_helpers.py | 10 +- sickgear.py | 8 +- sickgear/__init__.py | 8 +- sickgear/databases/cache_db.py | 20 +- sickgear/databases/failed_db.py | 30 +- sickgear/databases/mainDB.py | 594 +++++++++++------------ sickgear/db.py | 131 +++-- sickgear/providers/generic.py | 8 +- sickgear/tvcache.py | 2 +- tests/migration_tests.py | 14 +- tests/test_lib.py | 8 +- 12 files changed, 412 insertions(+), 423 deletions(-) diff --git a/gui/slick/interfaces/default/config.tmpl b/gui/slick/interfaces/default/config.tmpl index 26b462e9..23d9b4ce 100644 --- a/gui/slick/interfaces/default/config.tmpl +++ b/gui/slick/interfaces/default/config.tmpl @@ -29,7 +29,7 @@ Config file:$sg_str('CONFIG_FILE') - Database file:$db.dbFilename() + Database file:$db.db_filename() #if $db.db_supports_backup Database backups:$backup_db_path #end if diff --git a/lib/sg_helpers.py b/lib/sg_helpers.py index 18ef9cc6..41f99ef9 100644 --- a/lib/sg_helpers.py +++ b/lib/sg_helpers.py @@ -159,7 +159,7 @@ class ConnectionFailDict(object): if None is not db: with self.lock: my_db = db.DBConnection('cache.db') - if my_db.hasTable('connection_fails'): + if my_db.has_table('connection_fails'): domains = my_db.select('SELECT DISTINCT domain_url from connection_fails') for domain in domains: self.domain_list[domain['domain_url']] = ConnectionFailList(domain['domain_url']) @@ -515,7 +515,7 @@ class ConnectionFailList(object): def _load_fail_values(self): if None is not DATA_DIR: my_db = db.DBConnection('cache.db') - if my_db.hasTable('connection_fails_count'): + if my_db.has_table('connection_fails_count'): r = my_db.select('SELECT * FROM connection_fails_count WHERE domain_url = ?', [self.url]) if r: self._failure_count = try_int(r[0]['failure_count'], 0) @@ -536,7 +536,7 @@ class ConnectionFailList(object): def _save_fail_value(self, field, value): my_db = db.DBConnection('cache.db') - if my_db.hasTable('connection_fails_count'): + if my_db.has_table('connection_fails_count'): r = my_db.action('UPDATE connection_fails_count SET %s = ? WHERE domain_url = ?' % field, [value, self.url]) if 0 == r.rowcount: @@ -568,7 +568,7 @@ class ConnectionFailList(object): with self.lock: try: my_db = db.DBConnection('cache.db') - if my_db.hasTable('connection_fails'): + if my_db.has_table('connection_fails'): results = my_db.select('SELECT * FROM connection_fails WHERE domain_url = ?', [self.url]) self._fails = [] for r in results: @@ -586,7 +586,7 @@ class ConnectionFailList(object): with self.lock: try: my_db = db.DBConnection('cache.db') - if my_db.hasTable('connection_fails'): + if my_db.has_table('connection_fails'): # noinspection PyCallByClass,PyTypeChecker time_limit = _totimestamp(datetime.datetime.now() - datetime.timedelta(days=28)) my_db.action('DELETE FROM connection_fails WHERE fail_time < ?', [time_limit]) diff --git a/sickgear.py b/sickgear.py index 9959b19b..d65b14b6 100755 --- a/sickgear.py +++ b/sickgear.py @@ -428,7 +428,7 @@ class SickGear(object): ('sickbeard.db', sickgear.mainDB.MIN_DB_VERSION, sickgear.mainDB.MAX_DB_VERSION, sickgear.mainDB.TEST_BASE_VERSION, 'MainDb') ]: - cur_db_version = db.DBConnection(d).checkDBVersion() + cur_db_version = db.DBConnection(d).check_db_version() # handling of standalone TEST db versions load_msg = 'Downgrading %s to production version' % d @@ -437,7 +437,7 @@ class SickGear(object): print('Your [%s] database version (%s) is a test db version and doesn\'t match SickGear required ' 'version (%s), downgrading to production db' % (d, cur_db_version, max_v)) self.execute_rollback(mo, max_v, load_msg) - cur_db_version = db.DBConnection(d).checkDBVersion() + cur_db_version = db.DBConnection(d).check_db_version() if 100000 <= cur_db_version: print(u'Rollback to production failed.') sys.exit(u'If you have used other forks, your database may be unusable due to their changes') @@ -452,7 +452,7 @@ class SickGear(object): print('Your [%s] database version (%s) is a db version and doesn\'t match SickGear required ' 'version (%s), downgrading to production base db' % (d, cur_db_version, max_v)) self.execute_rollback(mo, base_v, load_msg) - cur_db_version = db.DBConnection(d).checkDBVersion() + cur_db_version = db.DBConnection(d).check_db_version() if 100000 <= cur_db_version: print(u'Rollback to production base failed.') sys.exit(u'If you have used other forks, your database may be unusable due to their changes') @@ -474,7 +474,7 @@ class SickGear(object): u' what this version of SickGear supports. Trying to rollback now. Please wait...' % (d, cur_db_version)) self.execute_rollback(mo, max_v, load_msg) - if db.DBConnection(d).checkDBVersion() > max_v: + if db.DBConnection(d).check_db_version() > max_v: print(u'Rollback failed.') sys.exit(u'If you have used other forks, your database may be unusable due to their changes') print(u'Rollback of [%s] successful.' % d) diff --git a/sickgear/__init__.py b/sickgear/__init__.py index 0bf86253..3d7be274 100644 --- a/sickgear/__init__.py +++ b/sickgear/__init__.py @@ -1539,19 +1539,19 @@ def init_stage_2(): # initialize main database my_db = db.DBConnection() - db.MigrationCode(my_db) + db.migration_code(my_db) # initialize the cache database my_db = db.DBConnection('cache.db') - db.upgradeDatabase(my_db, cache_db.InitialSchema) + db.upgrade_database(my_db, cache_db.InitialSchema) # initialize the failed downloads database my_db = db.DBConnection('failed.db') - db.upgradeDatabase(my_db, failed_db.InitialSchema) + db.upgrade_database(my_db, failed_db.InitialSchema) # fix up any db problems my_db = db.DBConnection() - db.sanityCheckDatabase(my_db, mainDB.MainSanityCheck) + db.sanity_check_db(my_db, mainDB.MainSanityCheck) # initialize metadata_providers metadata_provider_dict = metadata.get_metadata_generator_dict() diff --git a/sickgear/databases/cache_db.py b/sickgear/databases/cache_db.py index 87e7ea98..2332af24 100644 --- a/sickgear/databases/cache_db.py +++ b/sickgear/databases/cache_db.py @@ -96,16 +96,16 @@ class InitialSchema(db.SchemaUpgrade): ]) def test(self): - return self.hasTable('lastUpdate') + return self.has_table('lastUpdate') def execute(self): self.do_query(self.queries[next(iter(self.queries))]) - self.setDBVersion(MIN_DB_VERSION, check_db_version=False) + self.set_db_version(MIN_DB_VERSION, check_db_version=False) class ConsolidateProviders(InitialSchema): def test(self): - return 1 < self.checkDBVersion() + return 1 < self.call_check_db_version() def execute(self): keep_tables = {'lastUpdate', 'lastSearch', 'db_version', @@ -113,13 +113,13 @@ class ConsolidateProviders(InitialSchema): # old provider_cache is dropped before re-creation # noinspection SqlResolve self.do_query(['DROP TABLE [provider_cache]'] + self.queries['consolidate_providers'] + - ['DROP TABLE [%s]' % t for t in (set(self.listTables()) - keep_tables)]) + ['DROP TABLE [%s]' % t for t in (set(self.list_tables()) - keep_tables)]) self.finish(True) class AddBacklogParts(ConsolidateProviders): def test(self): - return 2 < self.checkDBVersion() + return 2 < self.call_check_db_version() def execute(self): # noinspection SqlResolve @@ -130,7 +130,7 @@ class AddBacklogParts(ConsolidateProviders): class AddProviderFailureHandling(AddBacklogParts): def test(self): - return 3 < self.checkDBVersion() + return 3 < self.call_check_db_version() def execute(self): self.do_query(self.queries['add_provider_fails']) @@ -139,17 +139,17 @@ class AddProviderFailureHandling(AddBacklogParts): class AddIndexerToTables(AddProviderFailureHandling): def test(self): - return 4 < self.checkDBVersion() + return 4 < self.call_check_db_version() def execute(self): self.do_query(self.queries['add_indexer_to_tables']) - self.addColumn('provider_cache', 'indexer', 'NUMERIC') + self.add_column('provider_cache', 'indexer', 'NUMERIC') self.finish() class AddGenericFailureHandling(AddBacklogParts): def test(self): - return 5 < self.checkDBVersion() + return 5 < self.call_check_db_version() def execute(self): self.do_query(self.queries['connection_fails']) @@ -158,7 +158,7 @@ class AddGenericFailureHandling(AddBacklogParts): class AddSaveQueues(AddGenericFailureHandling): def test(self): - return 6 < self.checkDBVersion() + return 6 < self.call_check_db_version() def execute(self): self.do_query(self.queries['save_queues']) diff --git a/sickgear/databases/failed_db.py b/sickgear/databases/failed_db.py index 03f66c0a..60d760a8 100644 --- a/sickgear/databases/failed_db.py +++ b/sickgear/databases/failed_db.py @@ -28,7 +28,7 @@ TEST_BASE_VERSION = None # the base production db version, only needed for TEST # Add new migrations at the bottom of the list; subclass the previous migration. class InitialSchema(db.SchemaUpgrade): def test(self): - return self.hasTable('failed') + return self.has_table('failed') def execute(self): queries = [ @@ -45,18 +45,18 @@ class InitialSchema(db.SchemaUpgrade): class SizeAndProvider(InitialSchema): def test(self): - return self.hasColumn('failed', 'size') and self.hasColumn('failed', 'provider') + return self.has_column('failed', 'size') and self.has_column('failed', 'provider') def execute(self): - self.addColumn('failed', 'size') - self.addColumn('failed', 'provider', 'TEXT', '') + self.add_column('failed', 'size') + self.add_column('failed', 'provider', 'TEXT', '') class History(SizeAndProvider): """Snatch history that can't be modified by the user""" def test(self): - return self.hasTable('history') + return self.has_table('history') def execute(self): self.connection.action('CREATE TABLE history (date NUMERIC, ' + @@ -67,21 +67,21 @@ class HistoryStatus(History): """Store episode status before snatch to revert to if necessary""" def test(self): - return self.hasColumn('history', 'old_status') + return self.has_column('history', 'old_status') def execute(self): - self.addColumn('history', 'old_status', 'NUMERIC', Quality.NONE) - self.addColumn('history', 'showid', 'NUMERIC', '-1') - self.addColumn('history', 'season', 'NUMERIC', '-1') - self.addColumn('history', 'episode', 'NUMERIC', '-1') + self.add_column('history', 'old_status', 'NUMERIC', Quality.NONE) + self.add_column('history', 'showid', 'NUMERIC', '-1') + self.add_column('history', 'season', 'NUMERIC', '-1') + self.add_column('history', 'episode', 'NUMERIC', '-1') class AddIndexerToTables(HistoryStatus): def test(self): - return self.hasColumn('history', 'indexer') + return self.has_column('history', 'indexer') def execute(self): - self.addColumn('history', 'indexer', 'NUMERIC') + self.add_column('history', 'indexer', 'NUMERIC') main_db = db.DBConnection('sickbeard.db') show_ids = {s['prod_id']: s['tv_id'] for s in @@ -91,15 +91,15 @@ class AddIndexerToTables(HistoryStatus): cl.append(['UPDATE history SET indexer = ? WHERE showid = ?', [i, s_id]]) self.connection.mass_action(cl) - if self.connection.hasTable('backup_history'): + if self.connection.has_table('backup_history'): self.connection.action( 'REPLACE INTO history ' '(date, size, `release`, provider, old_status, showid, season, episode, indexer)' ' SELECT' ' date, size, `release`, provider, old_status, showid, season, episode, indexer' ' FROM backup_history') - self.connection.removeTable('backup_history') + self.connection.remove_table('backup_history') self.connection.action('VACUUM') - self.setDBVersion(2, check_db_version=False) + self.set_db_version(2, check_db_version=False) diff --git a/sickgear/databases/mainDB.py b/sickgear/databases/mainDB.py index 249751c4..c51e3108 100644 --- a/sickgear/databases/mainDB.py +++ b/sickgear/databases/mainDB.py @@ -103,7 +103,7 @@ class MainSanityCheck(db.DBSanityCheck): # This func would break with multi tv info sources and without tvid, so added check min db version to mitigate # Also, tv_show table had a unique index added at some time to prevent further dupes, # therefore, this func is kept to cleanse legacy data given that it's redundant for new row insertions - if self.connection.checkDBVersion() < 20004: + if self.connection.check_db_version() < 20004: sql_result = self.connection.select( 'SELECT show_id, %(col)s, COUNT(%(col)s) AS count FROM tv_shows GROUP BY %(col)s HAVING count > 1' @@ -136,7 +136,7 @@ class MainSanityCheck(db.DBSanityCheck): # This func would break with multi tv info sources and without tvid, so added check min db version to mitigate # Also, tv_show table had a unique index added at some time to prevent further dupes, # therefore, this func is kept to cleanse legacy data given that it's redundant for new row insertions - if self.connection.checkDBVersion() < 20007: + if self.connection.check_db_version() < 20007: sql_result = self.connection.select( 'SELECT indexer AS tv_id, showid AS prod_id, season, episode, COUNT(showid) as count' @@ -215,18 +215,18 @@ class MainSanityCheck(db.DBSanityCheck): logger.log('Updating TV Episode table with index idx_sta_epi_sta_air') self.connection.action('CREATE INDEX idx_sta_epi_sta_air ON tv_episodes (season, episode, status, airdate)') - if not self.connection.hasIndex('tv_episodes', 'idx_tv_ep_ids'): + if not self.connection.has_index('tv_episodes', 'idx_tv_ep_ids'): logger.log('Updating TV Episode table with index idx_tv_ep_ids') self.connection.action('CREATE INDEX idx_tv_ep_ids ON tv_episodes (indexer, showid)') - if not self.connection.hasIndex('tv_episodes', 'idx_tv_episodes_unique'): + if not self.connection.has_index('tv_episodes', 'idx_tv_episodes_unique'): self.connection.action('CREATE UNIQUE INDEX idx_tv_episodes_unique ON ' 'tv_episodes(indexer,showid,season,episode)') - allowtbl, blocktbl = (('allow', 'block'), ('white', 'black'))[not self.connection.hasTable('blocklist')] + allowtbl, blocktbl = (('allow', 'block'), ('white', 'black'))[not self.connection.has_table('blocklist')] for t in [('%slist' % allowtbl, 'show_id'), ('%slist' % blocktbl, 'show_id'), ('history', 'showid'), ('scene_exceptions', 'indexer_id')]: - if not self.connection.hasIndex('%s' % t[0], 'idx_id_indexer_%s' % t[0]): + if not self.connection.has_index('%s' % t[0], 'idx_id_indexer_%s' % t[0]): # noinspection SqlResolve self.connection.action('CREATE INDEX idx_id_indexer_%s ON %s (indexer, %s)' % (t[0], t[0], t[1])) @@ -309,9 +309,9 @@ class InitialSchema(db.SchemaUpgrade): # Add new migrations at the bottom of the list; subclass the previous migration. # 0 -> 20009 def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - if not self.hasTable('tv_shows') and not self.hasTable('db_version'): + if not self.has_table('tv_shows') and not self.has_table('db_version'): queries = [ # anime allow and block list 'CREATE TABLE allowlist (show_id INTEGER, range TEXT, keyword TEXT, indexer NUMERIC)', @@ -383,7 +383,7 @@ class InitialSchema(db.SchemaUpgrade): self.connection.action(query) else: - cur_db_version = self.checkDBVersion() + cur_db_version = self.call_check_db_version() if cur_db_version < MIN_DB_VERSION: logger.log_error_and_exit( @@ -403,7 +403,7 @@ class InitialSchema(db.SchemaUpgrade): ' your database may be unusable due to their modifications.' ) - return self.checkDBVersion() + return self.call_check_db_version() # 9 -> 10 @@ -413,13 +413,13 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): This func is only for 9->10 where older db columns exist, those columns have since changed """ - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - if not self.hasColumn('tv_episodes', 'file_size'): - self.addColumn('tv_episodes', 'file_size') + if not self.has_column('tv_episodes', 'file_size'): + self.add_column('tv_episodes', 'file_size') - if not self.hasColumn('tv_episodes', 'release_name'): - self.addColumn('tv_episodes', 'release_name', 'TEXT', '') + if not self.has_column('tv_episodes', 'release_name'): + self.add_column('tv_episodes', 'release_name', 'TEXT', '') sql_result = self.connection.select('SELECT episode_id, location, file_size FROM tv_episodes') @@ -528,14 +528,14 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): self.connection.action('UPDATE tv_episodes SET release_name = ? WHERE episode_id = ?', [ep_file_name, cur_result['episode_id']]) - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 10 -> 11 class RenameSeasonFolders(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) # rename the column self.connection.action('ALTER TABLE tv_shows RENAME TO tmp_tv_shows') @@ -558,8 +558,8 @@ class RenameSeasonFolders(db.SchemaUpgrade): # noinspection SqlResolve self.connection.action('DROP TABLE tmp_tv_shows') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 11 -> 12 @@ -628,7 +628,7 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): return result def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) # update the default quality so we dont grab the wrong qualities after migration sickgear.QUALITY_DEFAULT = self._update_composite_qualities(sickgear.QUALITY_DEFAULT) @@ -697,12 +697,12 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): [self._update_quality(cur_entry['quality']), cur_entry['showid'], cur_entry['date']]]) self.connection.mass_action(cl) - self.incDBVersion() + self.inc_db_version() # cleanup and reduce db if any previous data was removed self.upgrade_log(u'Performing a vacuum on the database.', logger.DEBUG) self.connection.action('VACUUM') - return self.checkDBVersion() + return self.call_check_db_version() # 12 -> 13 @@ -710,20 +710,20 @@ class AddShowidTvdbidIndex(db.SchemaUpgrade): # Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Checking for duplicate shows before adding unique index.') MainSanityCheck(self.connection).fix_duplicate_shows('tvdb_id') self.upgrade_log(u'Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries.') - if not self.hasTable('idx_showid'): + if not self.has_table('idx_showid'): self.connection.action('CREATE INDEX idx_showid ON tv_episodes (showid);') - if not self.hasTable('idx_tvdb_id'): + if not self.has_table('idx_tvdb_id'): # noinspection SqlResolve self.connection.action('CREATE UNIQUE INDEX idx_tvdb_id ON tv_shows (tvdb_id);') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 13 -> 14 @@ -731,23 +731,23 @@ class AddLastUpdateTVDB(db.SchemaUpgrade): # Adding column last_update_tvdb to tv_shows for controlling nightly updates def execute(self): - if not self.hasColumn('tv_shows', 'last_update_tvdb'): + if not self.has_column('tv_shows', 'last_update_tvdb'): self.upgrade_log(u'Adding column last_update_tvdb to tv_shows') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) - self.addColumn('tv_shows', 'last_update_tvdb', default=1) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + self.add_column('tv_shows', 'last_update_tvdb', default=1) - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 14 -> 15 class AddDBIncreaseTo15(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version to v%s' % self.checkDBVersion()) - self.incDBVersion() - return self.checkDBVersion() + self.upgrade_log(u'Bumping database version to v%s' % self.call_check_db_version()) + self.inc_db_version() + return self.call_check_db_version() # 15 -> 16 @@ -755,121 +755,121 @@ class AddIMDbInfo(db.SchemaUpgrade): def execute(self): db_backed_up = False - if not self.hasTable('imdb_info'): + if not self.has_table('imdb_info'): self.upgrade_log(u'Creating IMDb table imdb_info') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True self.connection.action( 'CREATE TABLE imdb_info (tvdb_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC,' ' akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT,' ' rating TEXT, votes INTEGER, last_update NUMERIC)') - if not self.hasColumn('tv_shows', 'imdb_id'): + if not self.has_column('tv_shows', 'imdb_id'): self.upgrade_log(u'Adding IMDb column imdb_id to tv_shows') if not db_backed_up: - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) - self.addColumn('tv_shows', 'imdb_id') + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + self.add_column('tv_shows', 'imdb_id') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 16 -> 17 class AddProperNamingSupport(db.SchemaUpgrade): def execute(self): - if not self.hasColumn('tv_shows', 'imdb_id')\ - and self.hasColumn('tv_shows', 'rls_require_words')\ - and self.hasColumn('tv_shows', 'rls_ignore_words'): - return self.setDBVersion(5816) + if not self.has_column('tv_shows', 'imdb_id')\ + and self.has_column('tv_shows', 'rls_require_words')\ + and self.has_column('tv_shows', 'rls_ignore_words'): + return self.set_db_version(5816) - if not self.hasColumn('tv_episodes', 'is_proper'): + if not self.has_column('tv_episodes', 'is_proper'): self.upgrade_log(u'Adding column is_proper to tv_episodes') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) - self.addColumn('tv_episodes', 'is_proper') + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + self.add_column('tv_episodes', 'is_proper') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 17 -> 18 class AddEmailSubscriptionTable(db.SchemaUpgrade): def execute(self): - if not self.hasColumn('tv_episodes', 'is_proper')\ - and self.hasColumn('tv_shows', 'rls_require_words')\ - and self.hasColumn('tv_shows', 'rls_ignore_words')\ - and self.hasColumn('tv_shows', 'skip_notices'): - return self.setDBVersion(5817) + if not self.has_column('tv_episodes', 'is_proper')\ + and self.has_column('tv_shows', 'rls_require_words')\ + and self.has_column('tv_shows', 'rls_ignore_words')\ + and self.has_column('tv_shows', 'skip_notices'): + return self.set_db_version(5817) - if not self.hasColumn('tv_shows', 'notify_list'): + if not self.has_column('tv_shows', 'notify_list'): self.upgrade_log(u'Adding column notify_list to tv_shows') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) - self.addColumn('tv_shows', 'notify_list', 'TEXT', None) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + self.add_column('tv_shows', 'notify_list', 'TEXT', None) - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 18 -> 19 class AddProperSearch(db.SchemaUpgrade): def execute(self): - if not self.hasColumn('tv_episodes', 'is_proper'): - return self.setDBVersion(12) + if not self.has_column('tv_episodes', 'is_proper'): + return self.set_db_version(12) - if not self.hasColumn('tv_shows', 'notify_list')\ - and self.hasColumn('tv_shows', 'rls_require_words')\ - and self.hasColumn('tv_shows', 'rls_ignore_words')\ - and self.hasColumn('tv_shows', 'skip_notices')\ - and self.hasColumn('history', 'source'): - return self.setDBVersion(5818) + if not self.has_column('tv_shows', 'notify_list')\ + and self.has_column('tv_shows', 'rls_require_words')\ + and self.has_column('tv_shows', 'rls_ignore_words')\ + and self.has_column('tv_shows', 'skip_notices')\ + and self.has_column('history', 'source'): + return self.set_db_version(5818) - if not self.hasColumn('info', 'last_proper_search'): + if not self.has_column('info', 'last_proper_search'): self.upgrade_log(u'Adding column last_proper_search to info') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) - self.addColumn('info', 'last_proper_search', default=1) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + self.add_column('info', 'last_proper_search', default=1) - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 19 -> 20 class AddDvdOrderOption(db.SchemaUpgrade): def execute(self): - if not self.hasColumn('tv_shows', 'dvdorder'): + if not self.has_column('tv_shows', 'dvdorder'): self.upgrade_log(u'Adding column dvdorder to tv_shows') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) - self.addColumn('tv_shows', 'dvdorder', 'NUMERIC', '0') + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + self.add_column('tv_shows', 'dvdorder', 'NUMERIC', '0') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 20 -> 21 class AddSubtitlesSupport(db.SchemaUpgrade): def execute(self): - if not self.hasColumn('tv_shows', 'subtitles'): + if not self.has_column('tv_shows', 'subtitles'): self.upgrade_log(u'Adding subtitles to tv_shows and tv_episodes') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) - self.addColumn('tv_shows', 'subtitles') - self.addColumn('tv_episodes', 'subtitles', 'TEXT', '') - self.addColumn('tv_episodes', 'subtitles_searchcount') - self.addColumn('tv_episodes', 'subtitles_lastsearch', 'TIMESTAMP', str(datetime.datetime.min)) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + self.add_column('tv_shows', 'subtitles') + self.add_column('tv_episodes', 'subtitles', 'TEXT', '') + self.add_column('tv_episodes', 'subtitles_searchcount') + self.add_column('tv_episodes', 'subtitles_lastsearch', 'TIMESTAMP', str(datetime.datetime.min)) - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 21 -> 22 class ConvertTVShowsToIndexerScheme(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Converting TV Shows table to Indexer Scheme...') - if self.hasTable('tmp_tv_shows'): + if self.has_table('tmp_tv_shows'): self.upgrade_log(u'Removing temp tv show tables left behind from previous updates...') # noinspection SqlResolve self.connection.action('DROP TABLE tmp_tv_shows') @@ -899,18 +899,18 @@ class ConvertTVShowsToIndexerScheme(db.SchemaUpgrade): # noinspection SqlConstantCondition self.connection.action('UPDATE tv_shows SET indexer = 1 WHERE 1=1') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 22 -> 23 class ConvertTVEpisodesToIndexerScheme(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Converting TV Episodes table to Indexer Scheme...') - if self.hasTable('tmp_tv_episodes'): + if self.has_table('tmp_tv_episodes'): self.upgrade_log(u'Removing temp tv episode tables left behind from previous updates...') # noinspection SqlResolve self.connection.action('DROP TABLE tmp_tv_episodes') @@ -940,18 +940,18 @@ class ConvertTVEpisodesToIndexerScheme(db.SchemaUpgrade): # noinspection SqlConstantCondition self.connection.action('UPDATE tv_episodes SET indexer = 1 WHERE 1=1') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 23 -> 24 class ConvertIMDBInfoToIndexerScheme(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Converting IMDb Info table to Indexer Scheme...') - if self.hasTable('tmp_imdb_info'): + if self.has_table('tmp_imdb_info'): self.upgrade_log(u'Removing temp imdb info tables left behind from previous updates...') # noinspection SqlResolve self.connection.action('DROP TABLE tmp_imdb_info') @@ -969,18 +969,18 @@ class ConvertIMDBInfoToIndexerScheme(db.SchemaUpgrade): # noinspection SqlResolve self.connection.action('DROP TABLE tmp_imdb_info') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 24 -> 25 class ConvertInfoToIndexerScheme(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Converting Info table to Indexer Scheme...') - if self.hasTable('tmp_info'): + if self.has_table('tmp_info'): self.upgrade_log(u'Removing temp info tables left behind from previous updates...') # noinspection SqlResolve self.connection.action('DROP TABLE tmp_info') @@ -995,29 +995,29 @@ class ConvertInfoToIndexerScheme(db.SchemaUpgrade): # noinspection SqlResolve self.connection.action('DROP TABLE tmp_info') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 25 -> 26 class AddArchiveFirstMatchOption(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - if not self.hasColumn('tv_shows', 'archive_firstmatch'): + if not self.has_column('tv_shows', 'archive_firstmatch'): self.upgrade_log(u'Adding column archive_firstmatch to tv_shows') - self.addColumn('tv_shows', 'archive_firstmatch', 'NUMERIC', '0') + self.add_column('tv_shows', 'archive_firstmatch', 'NUMERIC', '0') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 26 -> 27 class AddSceneNumbering(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - if self.hasTable('scene_numbering'): + if self.has_table('scene_numbering'): self.connection.action('DROP TABLE scene_numbering') self.upgrade_log(u'Upgrading table scene_numbering ...') @@ -1026,14 +1026,14 @@ class AddSceneNumbering(db.SchemaUpgrade): ' scene_season INTEGER, scene_episode INTEGER,' ' PRIMARY KEY (indexer_id,season,episode))') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 27 -> 28 class ConvertIndexerToInteger(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) cl = [] self.upgrade_log(u'Converting Indexer to Integer ...') @@ -1046,50 +1046,50 @@ class ConvertIndexerToInteger(db.SchemaUpgrade): self.connection.mass_action(cl) - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 28 -> 29 class AddRequireAndIgnoreWords(db.SchemaUpgrade): # Adding column rls_require_words and rls_ignore_words to tv_shows def execute(self): - if self.hasColumn('tv_shows', 'rls_require_words') and self.hasColumn('tv_shows', 'rls_ignore_words'): - self.incDBVersion() - return self.checkDBVersion() + if self.has_column('tv_shows', 'rls_require_words') and self.has_column('tv_shows', 'rls_ignore_words'): + self.inc_db_version() + return self.call_check_db_version() db_backed_up = False - if not self.hasColumn('tv_shows', 'rls_require_words'): + if not self.has_column('tv_shows', 'rls_require_words'): self.upgrade_log(u'Adding column rls_require_words to tv_shows') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True - self.addColumn('tv_shows', 'rls_require_words', 'TEXT', '') + self.add_column('tv_shows', 'rls_require_words', 'TEXT', '') - if not self.hasColumn('tv_shows', 'rls_ignore_words'): + if not self.has_column('tv_shows', 'rls_ignore_words'): self.upgrade_log(u'Adding column rls_ignore_words to tv_shows') if not db_backed_up: - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) - self.addColumn('tv_shows', 'rls_ignore_words', 'TEXT', '') + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + self.add_column('tv_shows', 'rls_ignore_words', 'TEXT', '') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 29 -> 30 class AddSportsOption(db.SchemaUpgrade): def execute(self): db_backed_up = False - if not self.hasColumn('tv_shows', 'sports'): + if not self.has_column('tv_shows', 'sports'): self.upgrade_log(u'Adding column sports to tv_shows') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True - self.addColumn('tv_shows', 'sports', 'NUMERIC', '0') + self.add_column('tv_shows', 'sports', 'NUMERIC', '0') - if self.hasColumn('tv_shows', 'air_by_date') and self.hasColumn('tv_shows', 'sports'): + if self.has_column('tv_shows', 'air_by_date') and self.has_column('tv_shows', 'sports'): # update sports column self.upgrade_log(u'[4/4] Updating tv_shows to reflect the correct sports value...') if not db_backed_up: - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) cl = [] history_quality = self.connection.select( 'SELECT * FROM tv_shows WHERE LOWER(classification) = "sports" AND air_by_date = 1 AND sports = 0') @@ -1099,117 +1099,117 @@ class AddSportsOption(db.SchemaUpgrade): cl.append(['UPDATE tv_shows SET air_by_date = 0 WHERE show_id = ?', [cur_entry['show_id']]]) self.connection.mass_action(cl) - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 30 -> 31 class AddSceneNumberingToTvEpisodes(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Adding columns scene_season and scene_episode to tvepisodes') - self.addColumn('tv_episodes', 'scene_season', 'NUMERIC', 'NULL') - self.addColumn('tv_episodes', 'scene_episode', 'NUMERIC', 'NULL') + self.add_column('tv_episodes', 'scene_season', 'NUMERIC', 'NULL') + self.add_column('tv_episodes', 'scene_episode', 'NUMERIC', 'NULL') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 31 -> 32 class AddAnimeTVShow(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Adding column anime to tv_episodes') - self.addColumn('tv_shows', 'anime', 'NUMERIC', '0') + self.add_column('tv_shows', 'anime', 'NUMERIC', '0') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 32 -> 33 class AddAbsoluteNumbering(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Adding column absolute_number to tv_episodes') - self.addColumn('tv_episodes', 'absolute_number', 'NUMERIC', '0') + self.add_column('tv_episodes', 'absolute_number', 'NUMERIC', '0') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 33 -> 34 class AddSceneAbsoluteNumbering(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Adding columns absolute_number and scene_absolute_number to scene_numbering') - self.addColumn('scene_numbering', 'absolute_number', 'NUMERIC', '0') - self.addColumn('scene_numbering', 'scene_absolute_number', 'NUMERIC', '0') + self.add_column('scene_numbering', 'absolute_number', 'NUMERIC', '0') + self.add_column('scene_numbering', 'scene_absolute_number', 'NUMERIC', '0') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 34 -> 35 class AddAnimeAllowlistBlocklist(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) cl = [['CREATE TABLE allowlist (show_id INTEGER, range TEXT, keyword TEXT, indexer NUMERIC)'], ['CREATE TABLE blocklist (show_id INTEGER, range TEXT, keyword TEXT, indexer NUMERIC)']] self.upgrade_log(u'Creating tables for anime allow and block lists') self.connection.mass_action(cl) - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 35 -> 36 class AddSceneAbsoluteNumbering2(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Adding column scene_absolute_number to tv_episodes') - self.addColumn('tv_episodes', 'scene_absolute_number', 'NUMERIC', '0') + self.add_column('tv_episodes', 'scene_absolute_number', 'NUMERIC', '0') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 36 -> 37 class AddXemRefresh(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Creating table xem_refresh') self.connection.action( 'CREATE TABLE xem_refresh (indexer TEXT, indexer_id INTEGER PRIMARY KEY, last_refreshed INTEGER)') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 37 -> 38 class AddSceneToTvShows(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Adding column scene to tv_shows') - self.addColumn('tv_shows', 'scene', 'NUMERIC', '0') + self.add_column('tv_shows', 'scene', 'NUMERIC', '0') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 38 -> 39 class AddIndexerMapping(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - if self.hasTable('indexer_mapping'): + if self.has_table('indexer_mapping'): self.connection.action('DROP TABLE indexer_mapping') self.upgrade_log(u'Adding table indexer_mapping') @@ -1217,44 +1217,44 @@ class AddIndexerMapping(db.SchemaUpgrade): 'CREATE TABLE indexer_mapping (indexer_id INTEGER, indexer NUMERIC, mindexer_id INTEGER, mindexer NUMERIC,' ' PRIMARY KEY (indexer_id, indexer))') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 39 -> 40 class AddVersionToTvEpisodes(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Adding columns release_group and version to tv_episodes') - self.addColumn('tv_episodes', 'release_group', 'TEXT', '') - self.addColumn('tv_episodes', 'version', 'NUMERIC', '-1') + self.add_column('tv_episodes', 'release_group', 'TEXT', '') + self.add_column('tv_episodes', 'version', 'NUMERIC', '-1') self.upgrade_log(u'Adding column version to history') - self.addColumn('history', 'version', 'NUMERIC', '-1') + self.add_column('history', 'version', 'NUMERIC', '-1') - self.incDBVersion() - return self.checkDBVersion() + self.inc_db_version() + return self.call_check_db_version() # 40 -> 10000 class BumpDatabaseVersion(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Bumping database version') - return self.setDBVersion(10000) + return self.set_db_version(10000) # 41,42 -> 10001 class Migrate41(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Bumping database version') - return self.setDBVersion(10001) + return self.set_db_version(10001) # 43,44 -> 10001 @@ -1264,25 +1264,25 @@ class Migrate43(db.SchemaUpgrade): db_backed_up = False db_chg = None table = 'tmdb_info' - if self.hasTable(table): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + if self.has_table(table): + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True self.upgrade_log(u'Dropping redundant table tmdb_info') # noinspection SqlResolve self.connection.action('DROP TABLE [%s]' % table) db_chg = True - if self.hasColumn('tv_shows', 'tmdb_id'): + if self.has_column('tv_shows', 'tmdb_id'): if not db_backed_up: - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True self.upgrade_log(u'Dropping redundant tmdb_info refs') - self.dropColumn('tv_shows', 'tmdb_id') + self.drop_columns('tv_shows', 'tmdb_id') db_chg = True - if not self.hasTable('db_version'): + if not self.has_table('db_version'): if not db_backed_up: - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.connection.action('PRAGMA user_version = 0') self.connection.action('CREATE TABLE db_version (db_version INTEGER);') self.connection.action('INSERT INTO db_version (db_version) VALUES (0);') @@ -1290,124 +1290,124 @@ class Migrate43(db.SchemaUpgrade): if not db_chg: self.upgrade_log(u'Bumping database version') - return self.setDBVersion(10001) + return self.set_db_version(10001) # 4301 -> 10002 class Migrate4301(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Bumping database version') - return self.setDBVersion(10002) + return self.set_db_version(10002) # 4302,4400 -> 10003 class Migrate4302(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Bumping database version') - return self.setDBVersion(10003) + return self.set_db_version(10003) # 5816 - 5818 -> 15 class MigrateUpstream(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Migrate SickBeard db v%s into v15' % str(self.checkDBVersion()).replace('58', '')) + self.upgrade_log(u'Migrate SickBeard db v%s into v15' % str(self.call_check_db_version()).replace('58', '')) - return self.setDBVersion(15) + return self.set_db_version(15) # 10000 -> 20000 class SickGearDatabaseVersion(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Bumping database version to new SickGear standards') - return self.setDBVersion(20000) + return self.set_db_version(20000) # 10001 -> 10000 class RemoveDefaultEpStatusFromTvShows(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Dropping redundant column default_ep_status from tv_shows') - self.dropColumn('tv_shows', 'default_ep_status') + self.drop_columns('tv_shows', 'default_ep_status') - return self.setDBVersion(10000) + return self.set_db_version(10000) # 10002 -> 10001 class RemoveMinorDBVersion(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Dropping redundant column db_minor_version from db_version') - self.dropColumn('db_version', 'db_minor_version') + self.drop_columns('db_version', 'db_minor_version') - return self.setDBVersion(10001) + return self.set_db_version(10001) # 10003 -> 10002 class RemoveMetadataSub(db.SchemaUpgrade): def execute(self): - if self.hasColumn('tv_shows', 'sub_use_sr_metadata'): + if self.has_column('tv_shows', 'sub_use_sr_metadata'): self.upgrade_log(u'Dropping redundant column metadata sub') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) - self.dropColumn('tv_shows', 'sub_use_sr_metadata') + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + self.drop_columns('tv_shows', 'sub_use_sr_metadata') - return self.setDBVersion(10002) + return self.set_db_version(10002) # 20000 -> 20001 class DBIncreaseTo20001(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log(u'Bumping database version to force a backup before new database code') self.connection.action('VACUUM') self.upgrade_log(u'Performed a vacuum on the database', logger.DEBUG) - return self.setDBVersion(20001) + return self.set_db_version(20001) # 20001 -> 20002 class AddTvShowOverview(db.SchemaUpgrade): def execute(self): - if not self.hasColumn('tv_shows', 'overview'): + if not self.has_column('tv_shows', 'overview'): self.upgrade_log(u'Adding column overview to tv_shows') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) - self.addColumn('tv_shows', 'overview', 'TEXT', '') + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + self.add_column('tv_shows', 'overview', 'TEXT', '') - return self.setDBVersion(20002) + return self.set_db_version(20002) # 20002 -> 20003 class AddTvShowTags(db.SchemaUpgrade): def execute(self): - if not self.hasColumn('tv_shows', 'tag'): + if not self.has_column('tv_shows', 'tag'): self.upgrade_log(u'Adding tag to tv_shows') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) - self.addColumn('tv_shows', 'tag', 'TEXT', 'Show List') + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + self.add_column('tv_shows', 'tag', 'TEXT', 'Show List') - return self.setDBVersion(20003) + return self.set_db_version(20003) # 20003 -> 20004 class ChangeMapIndexer(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - if self.hasTable('indexer_mapping'): + if self.has_table('indexer_mapping'): self.connection.action('DROP TABLE indexer_mapping') self.upgrade_log(u'Changing table indexer_mapping') @@ -1418,22 +1418,22 @@ class ChangeMapIndexer(db.SchemaUpgrade): self.connection.action('CREATE INDEX IF NOT EXISTS idx_mapping ON indexer_mapping (indexer_id, indexer)') - if not self.hasColumn('info', 'last_run_backlog'): + if not self.has_column('info', 'last_run_backlog'): self.upgrade_log('Adding last_run_backlog to info') - self.addColumn('info', 'last_run_backlog', 'NUMERIC', 1) + self.add_column('info', 'last_run_backlog', 'NUMERIC', 1) self.upgrade_log(u'Moving table scene_exceptions from cache.db to sickbeard.db') - if self.hasTable('scene_exceptions_refresh'): + if self.has_table('scene_exceptions_refresh'): self.connection.action('DROP TABLE scene_exceptions_refresh') self.connection.action('CREATE TABLE scene_exceptions_refresh (list TEXT PRIMARY KEY, last_refreshed INTEGER)') - if self.hasTable('scene_exceptions'): + if self.has_table('scene_exceptions'): self.connection.action('DROP TABLE scene_exceptions') self.connection.action('CREATE TABLE scene_exceptions (exception_id INTEGER PRIMARY KEY,' ' indexer_id INTEGER KEY, show_name TEXT, season NUMERIC, custom NUMERIC)') try: cachedb = db.DBConnection(filename='cache.db') - if cachedb.hasTable('scene_exceptions'): + if cachedb.has_table('scene_exceptions'): sql_result = cachedb.action('SELECT * FROM scene_exceptions') cs = [] for cur_result in sql_result: @@ -1452,7 +1452,7 @@ class ChangeMapIndexer(db.SchemaUpgrade): 'scene_exceptions', 'scene_exceptions_refresh', 'info', 'indexer_mapping', 'db_version', 'history', 'imdb_info', 'lastUpdate', 'scene_numbering', 'tv_episodes', 'tv_shows', 'xem_refresh'} - current_tables = set(self.listTables()) + current_tables = set(self.list_tables()) remove_tables = list(current_tables - keep_tables) for table in remove_tables: # noinspection SqlResolve @@ -1460,34 +1460,34 @@ class ChangeMapIndexer(db.SchemaUpgrade): self.connection.action('VACUUM') - return self.setDBVersion(20004) + return self.set_db_version(20004) # 20004 -> 20005 class AddShowNotFoundCounter(db.SchemaUpgrade): def execute(self): - if not self.hasTable('tv_shows_not_found'): + if not self.has_table('tv_shows_not_found'): self.upgrade_log(u'Adding table tv_shows_not_found') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.connection.action( 'CREATE TABLE tv_shows_not_found (indexer NUMERIC NOT NULL, indexer_id NUMERIC NOT NULL,' ' fail_count NUMERIC NOT NULL DEFAULT 0, last_check NUMERIC NOT NULL, last_success NUMERIC,' ' PRIMARY KEY (indexer_id, indexer))') - return self.setDBVersion(20005) + return self.set_db_version(20005) # 20005 -> 20006 class AddFlagTable(db.SchemaUpgrade): def execute(self): - if not self.hasTable('flags'): + if not self.has_table('flags'): self.upgrade_log(u'Adding table flags') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.connection.action('CREATE TABLE flags (flag PRIMARY KEY NOT NULL )') - return self.setDBVersion(20006) + return self.set_db_version(20006) # 20006 -> 20007 @@ -1496,61 +1496,61 @@ class DBIncreaseTo20007(db.SchemaUpgrade): self.upgrade_log(u'Bumping database version') - return self.setDBVersion(20007) + return self.set_db_version(20007) # 20007 -> 20008 class AddWebdlTypesTable(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.connection.action('CREATE TABLE webdl_types (dname TEXT NOT NULL , regex TEXT NOT NULL )') - return self.setDBVersion(20008) + return self.set_db_version(20008) # 20008 -> 20009 class AddWatched(db.SchemaUpgrade): def execute(self): # remove old table from version 20007 - if self.hasTable('tv_episodes_watched') and not self.hasColumn('tv_episodes_watched', 'clientep_id'): + if self.has_table('tv_episodes_watched') and not self.has_column('tv_episodes_watched', 'clientep_id'): self.connection.action('DROP TABLE tv_episodes_watched') self.connection.action('VACUUM') - if not self.hasTable('tv_episodes_watched'): + if not self.has_table('tv_episodes_watched'): self.upgrade_log(u'Adding table tv_episodes_watched') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.connection.action( 'CREATE TABLE tv_episodes_watched (tvep_id NUMERIC NOT NULL, clientep_id TEXT, label TEXT,' ' played NUMERIC DEFAULT 0 NOT NULL, date_watched NUMERIC NOT NULL, date_added NUMERIC,' ' status NUMERIC, location TEXT, file_size NUMERIC, hide INT default 0 not null)' ) - return self.setDBVersion(20009) + return self.set_db_version(20009) # 20009 -> 20010 class AddPrune(db.SchemaUpgrade): def execute(self): - if not self.hasColumn('tv_shows', 'prune'): + if not self.has_column('tv_shows', 'prune'): self.upgrade_log('Adding prune to tv_shows') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) - self.addColumn('tv_shows', 'prune', 'INT', 0) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + self.add_column('tv_shows', 'prune', 'INT', 0) - return self.setDBVersion(20010) + return self.set_db_version(20010) # 20010 -> 20011 class AddIndexerToTables(db.SchemaUpgrade): def execute(self): sickgear.helpers.upgrade_new_naming() - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) show_ids = {s['prod_id']: s['tv_id'] for s in self.connection.select('SELECT indexer AS tv_id, indexer_id AS prod_id FROM tv_shows')} - allowtbl, blocktbl = (('allow', 'block'), ('white', 'black'))[not self.connection.hasTable('blocklist')] + allowtbl, blocktbl = (('allow', 'block'), ('white', 'black'))[not self.connection.has_table('blocklist')] allowtbl, blocktbl = '%slist' % allowtbl, '%slist' % blocktbl columns = {allowtbl: 'show_id, range, keyword, indexer', blocktbl: 'show_id, range, keyword, indexer', @@ -1560,9 +1560,9 @@ class AddIndexerToTables(db.SchemaUpgrade): # add missing indexer column for t in [(allowtbl, 'show_id'), (blocktbl, 'show_id'), ('history', 'showid'), ('scene_exceptions', 'indexer_id')]: - if not self.hasColumn(t[0], 'indexer'): + if not self.has_column(t[0], 'indexer'): self.upgrade_log(u'Adding TV info support to %s table' % t[0]) - self.addColumn(t[0], 'indexer') + self.add_column(t[0], 'indexer') cl = [] for s_id, i in iteritems(show_ids): # noinspection SqlResolve @@ -1578,11 +1578,11 @@ class AddIndexerToTables(db.SchemaUpgrade): if 0 < self.connection.connection.total_changes: self.upgrade_log('Removed orphaned data from %s' % t[0]) - if self.connection.hasTable('backup_%s' % t[0]): + if self.connection.has_table('backup_%s' % t[0]): self.upgrade_log('Adding backup data to %s' % t[0]) self.connection.action('REPLACE INTO %s SELECT %s FROM %s' % ('%s (%s)' % (t[0], columns[t[0]]), columns[t[0]], 'backup_%s' % t[0])) - self.connection.removeTable('backup_%s' % t[0]) + self.connection.remove_table('backup_%s' % t[0]) # recreate tables that have wrong primary key = indexer_id without indexer self.upgrade_log('Adding TV info support to scene_numbering') @@ -1626,7 +1626,7 @@ class AddIndexerToTables(db.SchemaUpgrade): self.connection.mass_action(cl) self.connection.action('CREATE INDEX idx_id_indexer_imdb_info ON imdb_info (indexer,indexer_id)') - if self.connection.hasTable('backup_imdb_info'): + if self.connection.has_table('backup_imdb_info'): self.upgrade_log('Adding backup data to imdb_info') # noinspection SqlResolve self.connection.action('REPLACE INTO imdb_info (indexer, indexer_id, imdb_id, title, year, akas, ' @@ -1634,29 +1634,29 @@ class AddIndexerToTables(db.SchemaUpgrade): 'last_update) SELECT indexer, indexer_id, imdb_id, title, year, akas, runtimes, ' 'genres, countries, country_codes, certificates, rating, votes, last_update ' 'FROM backup_imdb_info') - self.connection.removeTable('backup_imdb_info') + self.connection.remove_table('backup_imdb_info') # remove an index of an no longer existing column self.upgrade_log('Changing/Re-Creating Indexes') - if self.connection.hasIndex('tv_shows', 'idx_tvdb_id'): - self.connection.removeIndex('tv_shows', 'idx_tvdb_id') + if self.connection.has_index('tv_shows', 'idx_tvdb_id'): + self.connection.remove_index('tv_shows', 'idx_tvdb_id') - if self.connection.hasIndex('tv_shows', 'idx_indexer_id'): - self.connection.removeIndex('tv_shows', 'idx_indexer_id') + if self.connection.has_index('tv_shows', 'idx_indexer_id'): + self.connection.remove_index('tv_shows', 'idx_indexer_id') self.connection.action('CREATE UNIQUE INDEX idx_indexer_id ON tv_shows (indexer,indexer_id)') - if self.connection.hasIndex('tv_episodes', 'idx_showid'): - self.connection.removeIndex('tv_episodes', 'idx_showid') + if self.connection.has_index('tv_episodes', 'idx_showid'): + self.connection.remove_index('tv_episodes', 'idx_showid') - if self.connection.hasIndex('tv_episodes', 'idx_tv_episodes_showid_airdate'): - self.connection.removeIndex('tv_episodes', 'idx_tv_episodes_showid_airdate') + if self.connection.has_index('tv_episodes', 'idx_tv_episodes_showid_airdate'): + self.connection.remove_index('tv_episodes', 'idx_tv_episodes_showid_airdate') self.connection.action('CREATE INDEX idx_tv_episodes_showid_airdate ON tv_episodes(indexer,showid,airdate)') - if not self.connection.hasIndex('tv_episodes', 'idx_tv_episodes_unique'): + if not self.connection.has_index('tv_episodes', 'idx_tv_episodes_unique'): self.connection.action('CREATE UNIQUE INDEX idx_tv_episodes_unique ON ' 'tv_episodes(indexer,showid,season,episode)') - if self.connection.hasTable('backup_tv_episodes'): + if self.connection.has_table('backup_tv_episodes'): self.upgrade_log('Adding backup data to tv_episodes') # noinspection SqlResolve self.connection.action('REPLACE INTO tv_episodes (episode_id, showid, indexerid, indexer, name, season, ' @@ -1668,9 +1668,9 @@ class AddIndexerToTables(db.SchemaUpgrade): 'file_size, release_name, subtitles, subtitles_searchcount, subtitles_lastsearch, ' 'is_proper, scene_season, scene_episode, absolute_number, scene_absolute_number, ' 'release_group, version FROM backup_tv_episodes') - self.connection.removeTable('backup_tv_episodes') + self.connection.remove_table('backup_tv_episodes') - if self.connection.hasTable('backup_tv_shows'): + if self.connection.has_table('backup_tv_shows'): self.upgrade_log('Adding backup data to tv_shows') # noinspection SqlResolve self.connection.action('REPLACE INTO tv_shows (show_id, indexer_id, indexer, show_name, location, ' @@ -1684,25 +1684,25 @@ class AddIndexerToTables(db.SchemaUpgrade): 'notify_list, imdb_id, last_update_indexer, dvdorder, archive_firstmatch, ' 'rls_require_words, rls_ignore_words, sports, anime, scene, overview, tag, prune ' 'FROM backup_tv_shows') - self.connection.removeTable('backup_tv_shows') + self.connection.remove_table('backup_tv_shows') self.connection.action('VACUUM') - return self.setDBVersion(20011) + return self.set_db_version(20011) # 20011 -> 20012 class AddShowExludeGlobals(db.SchemaUpgrade): def execute(self): - if not self.hasColumn('tv_shows', 'rls_global_exclude_ignore'): + if not self.has_column('tv_shows', 'rls_global_exclude_ignore'): self.upgrade_log('Adding rls_global_exclude_ignore, rls_global_exclude_require to tv_shows') - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) - self.addColumn('tv_shows', 'rls_global_exclude_ignore', data_type='TEXT', default='') - self.addColumn('tv_shows', 'rls_global_exclude_require', data_type='TEXT', default='') + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + self.add_column('tv_shows', 'rls_global_exclude_ignore', data_type='TEXT', default='') + self.add_column('tv_shows', 'rls_global_exclude_require', data_type='TEXT', default='') - if self.hasTable('tv_shows_exclude_backup'): + if self.has_table('tv_shows_exclude_backup'): self.upgrade_log('Adding rls_global_exclude_ignore, rls_global_exclude_require from backup to tv_shows') # noinspection SqlResolve self.connection.mass_action([['UPDATE tv_shows SET rls_global_exclude_ignore = ' @@ -1717,15 +1717,15 @@ class AddShowExludeGlobals(db.SchemaUpgrade): ['DROP TABLE tv_shows_exclude_backup'] ]) - return self.setDBVersion(20012) + return self.set_db_version(20012) # 20012 -> 20013 class RenameAllowBlockListTables(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - if not self.connection.hasTable('blocklist'): + if not self.connection.has_table('blocklist'): self.upgrade_log('Renaming allow/block list tables') for old, new in (('black', 'block'), ('white', 'allow')): @@ -1738,19 +1738,19 @@ class RenameAllowBlockListTables(db.SchemaUpgrade): ['DROP TABLE tmp_%slist' % new] ]) - return self.setDBVersion(20013) + return self.set_db_version(20013) # 20013 -> 20014 class AddHistoryHideColumn(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - if not self.hasColumn('history', 'hide'): + if not self.has_column('history', 'hide'): self.upgrade_log('Adding hide column to history') - self.addColumn('history', 'hide', default=0, set_default=True) + self.add_column('history', 'hide', default=0, set_default=True) - if self.hasTable('history_hide_backup'): + if self.has_table('history_hide_backup'): self.upgrade_log('Restoring hide status in history from backup') # noinspection SqlResolve self.connection.mass_action([ @@ -1765,30 +1765,30 @@ class AddHistoryHideColumn(db.SchemaUpgrade): ['DROP TABLE history_hide_backup'] ]) - return self.setDBVersion(20014) + return self.set_db_version(20014) # 20014 -> 20015 class ChangeShowData(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection, 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.upgrade_log('Adding new data columns to tv_shows') - self.addColumns('tv_shows', [('timezone', 'TEXT', ''), ('airtime', 'NUMERIC'), - ('network_country', 'TEXT', ''), ('network_country_code', 'TEXT', ''), - ('network_id', 'NUMERIC'), ('network_is_stream', 'INTEGER'), - ('src_update_timestamp', 'INTEGER')]) + self.add_columns('tv_shows', [('timezone', 'TEXT', ''), ('airtime', 'NUMERIC'), + ('network_country', 'TEXT', ''), ('network_country_code', 'TEXT', ''), + ('network_id', 'NUMERIC'), ('network_is_stream', 'INTEGER'), + ('src_update_timestamp', 'INTEGER')]) self.upgrade_log('Adding new data columns to tv_episodes') - self.addColumns('tv_episodes', [('timezone', 'TEXT', ''), ('airtime', 'NUMERIC'), - ('runtime', 'NUMERIC', 0), ('timestamp', 'NUMERIC'), - ('network', 'TEXT', ''), ('network_country', 'TEXT', ''), - ('network_country_code', 'TEXT', ''), ('network_id', 'NUMERIC'), - ('network_is_stream', 'INTEGER')]) + self.add_columns('tv_episodes', [('timezone', 'TEXT', ''), ('airtime', 'NUMERIC'), + ('runtime', 'NUMERIC', 0), ('timestamp', 'NUMERIC'), + ('network', 'TEXT', ''), ('network_country', 'TEXT', ''), + ('network_country_code', 'TEXT', ''), ('network_id', 'NUMERIC'), + ('network_is_stream', 'INTEGER')]) - if not self.hasColumn('imdb_info', 'is_mini_series'): + if not self.has_column('imdb_info', 'is_mini_series'): self.upgrade_log('Adding new data columns to imdb_info') - self.addColumns('imdb_info', [('is_mini_series', 'INTEGER', 0), ('episode_count', 'NUMERIC')]) + self.add_columns('imdb_info', [('is_mini_series', 'INTEGER', 0), ('episode_count', 'NUMERIC')]) self.upgrade_log('Adding Character and Persons tables') @@ -1984,7 +1984,7 @@ class ChangeShowData(db.SchemaUpgrade): self.connection.mass_action(cl) self.connection.action('VACUUM') - return self.setDBVersion(20015) + return self.set_db_version(20015) # 20015 -> 20016 @@ -2014,8 +2014,8 @@ class ChangeTmdbID(db.SchemaUpgrade): except (BaseException, Exception): pass - db.backup_database(self.connection, 'sickbeard.db', self.checkDBVersion()) - has_tmdb_backups = all(self.hasTable(_r) for _r in + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) + has_tmdb_backups = all(self.has_table(_r) for _r in ('backup_tmdb_tv_shows', 'backup_tmdb_tv_episodes', 'backup_tmdb_indexer_mapping')) if has_tmdb_backups: self.upgrade_log('Checking for dupe shows in backup tables') @@ -2091,8 +2091,8 @@ class ChangeTmdbID(db.SchemaUpgrade): ['REPLACE INTO indexer_mapping (indexer_id, indexer, mindexer_id, mindexer, date, status)' ' SELECT indexer_id, indexer, mindexer_id, mindexer, date, status FROM backup_tmdb_indexer_mapping'], ])[has_tmdb_backups]) - [self.connection.removeTable(_t) for _t in ('backup_tmdb_tv_shows', 'backup_tmdb_tv_episodes', + [self.connection.remove_table(_t) for _t in ('backup_tmdb_tv_shows', 'backup_tmdb_tv_episodes', 'backup_tmdb_indexer_mapping')] - return self.setDBVersion(20016) + return self.set_db_version(20016) diff --git a/sickgear/db.py b/sickgear/db.py index bce8ed81..2e70ba16 100644 --- a/sickgear/db.py +++ b/sickgear/db.py @@ -37,6 +37,7 @@ from six import iterkeys, iteritems, itervalues # noinspection PyUnreachableCode if False: + # noinspection PyUnresolvedReferences from typing import Any, AnyStr, Dict, List, Optional, Tuple, Union @@ -47,7 +48,7 @@ db_support_upsert = (3, 25, 0) <= sqlite3.sqlite_version_info # type: bool db_supports_backup = hasattr(sqlite3.Connection, 'backup') and (3, 6, 11) <= sqlite3.sqlite_version_info # type: bool -def dbFilename(filename='sickbeard.db', suffix=None): +def db_filename(filename='sickbeard.db', suffix=None): # type: (AnyStr, Optional[AnyStr]) -> AnyStr """ @param filename: The sqlite database filename to use. If not specified, @@ -70,7 +71,7 @@ def mass_upsert_sql(table_name, value_dict, key_dict, sanitise=True): :param value_dict: dict of values to be set {'table_fieldname': value} :param key_dict: dict of restrains for update {'table_fieldname': value} :param sanitise: True to remove k, v pairs in keyDict from valueDict as they must not exist in both. - This option has a performance hit so it's best to remove key_dict keys from value_dict and set this False instead. + This option has a performance hit, so it's best to remove key_dict keys from value_dict and set this False instead. :type sanitise: Boolean :return: list of 2 sql command """ @@ -104,9 +105,9 @@ class DBConnection(object): from . import helpers self.new_db = False - db_src = dbFilename(filename) + db_src = db_filename(filename) if not os.path.isfile(db_src): - db_alt = dbFilename('sickrage.db') + db_alt = db_filename('sickrage.db') if os.path.isfile(db_alt): helpers.copy_file(db_alt, db_src) @@ -143,6 +144,7 @@ class DBConnection(object): logger.log('Backup target file already exists', logger.ERROR) return False, 'Backup target file already exists' + # noinspection PyUnusedLocal def progress(status, remaining, total): logger.log('Copied %s of %s pages...' % (total - remaining, total), logger.DEBUG) @@ -167,11 +169,11 @@ class DBConnection(object): return True, 'Backup successful' - def checkDBVersion(self): + def check_db_version(self): # type: (...) -> int try: - if self.hasTable('db_version'): + if self.has_table('db_version'): result = self.select('SELECT db_version FROM db_version') else: version = self.select('PRAGMA user_version')[0]['user_version'] @@ -185,7 +187,7 @@ class DBConnection(object): if result: version = int(result[0]['db_version']) - if 10000 > version and self.hasColumn('db_version', 'db_minor_version'): + if 10000 > version and self.has_column('db_version', 'db_minor_version'): # noinspection SqlResolve minor = self.select('SELECT db_minor_version FROM db_version') return version * 100 + int(minor[0]['db_minor_version']) @@ -313,7 +315,7 @@ class DBConnection(object): + ' VALUES (%s)' % ', '.join(['?'] * (len(value_dict) + len(key_dict))) self.action(query, list(value_dict.values()) + list(key_dict.values())) - def tableInfo(self, table_name): + def table_info(self, table_name): # type: (AnyStr) -> Dict[AnyStr, Dict[AnyStr, AnyStr]] # FIXME ? binding is not supported here, but I cannot find a way to escape a string manually @@ -331,38 +333,32 @@ class DBConnection(object): d[col[0]] = row[idx] return d - def hasTable(self, table_name): + def has_table(self, table_name): # type: (AnyStr) -> bool return 0 < len(self.select('SELECT 1 FROM sqlite_master WHERE name = ?;', (table_name,))) - def hasColumn(self, table_name, column): + def has_column(self, table_name, column): # type: (AnyStr, AnyStr) -> bool - return column in self.tableInfo(table_name) + return column in self.table_info(table_name) - def hasIndex(self, table_name, index): + def has_index(self, table_name, index): # type: (AnyStr, AnyStr) -> bool - sqlResults = self.select('PRAGMA index_list([%s])' % table_name) - for result in sqlResults: + sql_results = self.select('PRAGMA index_list([%s])' % table_name) + for result in sql_results: if result['name'] == index: return True return False - def removeIndex(self, table, name): + def remove_index(self, table, name): # type: (AnyStr, AnyStr) -> None - if self.hasIndex(table, name): + if self.has_index(table, name): self.action('DROP INDEX' + ' [%s]' % name) - def removeTable(self, name): + def remove_table(self, name): # type: (AnyStr) -> None - if self.hasTable(name): + if self.has_table(name): self.action('DROP TABLE' + ' [%s]' % name) - # noinspection SqlResolve - def addColumn(self, table, column, data_type='NUMERIC', default=0): - # type: (AnyStr, AnyStr, AnyStr, Any) -> None - self.action('ALTER TABLE [%s] ADD %s %s' % (table, column, data_type)) - self.action('UPDATE [%s] SET %s = ?' % (table, column), (default,)) - def has_flag(self, flag_name): # type: (AnyStr) -> bool sql_result = self.select('SELECT flag FROM flags WHERE flag = ?', [flag_name]) @@ -415,7 +411,7 @@ class DBConnection(object): logger.load_log('Upgrading %s' % self.filename, to_log, log_level) -def sanityCheckDatabase(connection, sanity_check): +def sanity_check_db(connection, sanity_check): sanity_check(connection).check() @@ -427,36 +423,36 @@ class DBSanityCheck(object): pass -def upgradeDatabase(connection, schema): +def upgrade_database(connection, schema): logger.log(u'Checking database structure...', logger.MESSAGE) connection.is_upgrading = False - connection.new_db = 0 == connection.checkDBVersion() - _processUpgrade(connection, schema) + connection.new_db = 0 == connection.check_db_version() + _process_upgrade(connection, schema) if connection.is_upgrading: connection.upgrade_log('Finished') -def prettyName(class_name): +def _pretty_name(class_name): # type: (AnyStr) -> AnyStr return ' '.join([x.group() for x in re.finditer('([A-Z])([a-z0-9]+)', class_name)]) -def restoreDatabase(filename, version): +def _restore_database(filename, version): logger.log(u'Restoring database before trying upgrade again') - if not sickgear.helpers.restore_versioned_file(dbFilename(filename=filename, suffix='v%s' % version), version): + if not sickgear.helpers.restore_versioned_file(db_filename(filename=filename, suffix='v%s' % version), version): logger.log_error_and_exit(u'Database restore failed, abort upgrading database') return False return True -def _processUpgrade(connection, upgrade_class): +def _process_upgrade(connection, upgrade_class): instance = upgrade_class(connection) - logger.log('Checking %s database upgrade' % prettyName(upgrade_class.__name__), logger.DEBUG) + logger.log('Checking %s database upgrade' % _pretty_name(upgrade_class.__name__), logger.DEBUG) if not instance.test(): connection.is_upgrading = True - connection.upgrade_log(getattr(upgrade_class, 'pretty_name', None) or prettyName(upgrade_class.__name__)) - logger.log('Database upgrade required: %s' % prettyName(upgrade_class.__name__), logger.MESSAGE) - db_version = connection.checkDBVersion() + connection.upgrade_log(getattr(upgrade_class, 'pretty_name', None) or _pretty_name(upgrade_class.__name__)) + logger.log('Database upgrade required: %s' % _pretty_name(upgrade_class.__name__), logger.MESSAGE) + db_version = connection.check_db_version() try: # only do backup if it's not a new db 0 < db_version and backup_database(connection, connection.filename, db_version) @@ -468,7 +464,7 @@ def _processUpgrade(connection, upgrade_class): # close db before attempting restore connection.close() - if restoreDatabase(connection.filename, db_version): + if _restore_database(connection.filename, db_version): logger.log_error_and_exit('Successfully restored database version: %s' % db_version) else: logger.log_error_and_exit('Failed to restore database version: %s' % db_version) @@ -480,7 +476,7 @@ def _processUpgrade(connection, upgrade_class): logger.log('%s upgrade not required' % upgrade_class.__name__, logger.DEBUG) for upgradeSubClass in upgrade_class.__subclasses__(): - _processUpgrade(connection, upgradeSubClass) + _process_upgrade(connection, upgradeSubClass) # Base migration class. All future DB changes should be subclassed from this class @@ -488,11 +484,11 @@ class SchemaUpgrade(object): def __init__(self, connection, **kwargs): self.connection = connection - def hasTable(self, table_name): + def has_table(self, table_name): return 0 < len(self.connection.select('SELECT 1 FROM sqlite_master WHERE name = ?;', (table_name,))) - def hasColumn(self, table_name, column): - return column in self.connection.tableInfo(table_name) + def has_column(self, table_name, column): + return column in self.connection.table_info(table_name) def list_tables(self): # type: (...) -> List[AnyStr] @@ -511,13 +507,13 @@ class SchemaUpgrade(object): ['index'])] # noinspection SqlResolve - def addColumn(self, table, column, data_type='NUMERIC', default=0, set_default=False): + def add_column(self, table, column, data_type='NUMERIC', default=0, set_default=False): self.connection.action('ALTER TABLE [%s] ADD %s %s%s' % (table, column, data_type, ('', ' DEFAULT "%s"' % default)[set_default])) self.connection.action('UPDATE [%s] SET %s = ?' % (table, column), (default,)) # noinspection SqlResolve - def addColumns(self, table, column_list=None): + def add_columns(self, table, column_list=None): # type: (AnyStr, List) -> None if isinstance(column_list, list): sql = [] @@ -535,25 +531,21 @@ class SchemaUpgrade(object): if sql: self.connection.mass_action(sql) - def dropColumn(self, table, columns): - # type: (AnyStr, AnyStr) -> None - self.drop_columns(table, columns) - def drop_columns(self, table, column): # type: (AnyStr, Union[AnyStr, List[AnyStr]]) -> None # get old table columns and store the ones we want to keep result = self.connection.select('pragma table_info([%s])' % table) columns_list = ([column], column)[isinstance(column, list)] - keptColumns = list(filter(lambda col: col['name'] not in columns_list, result)) + kept_columns = list(filter(lambda col: col['name'] not in columns_list, result)) - keptColumnsNames = [] + kept_columns_names = [] final = [] pk = [] # copy the old table schema, column by column - for column in keptColumns: + for column in kept_columns: - keptColumnsNames.append(column['name']) + kept_columns_names.append(column['name']) cl = [column['name'], column['type']] @@ -574,7 +566,7 @@ class SchemaUpgrade(object): # join all the table column creation fields final = ', '.join(final) - keptColumnsNames = ', '.join(keptColumnsNames) + kept_columns_names = ', '.join(kept_columns_names) # generate sql for the new table creation if 0 == len(pk): @@ -586,12 +578,12 @@ class SchemaUpgrade(object): # create new temporary table and copy the old table data across, barring the removed column self.connection.action(sql) # noinspection SqlResolve - self.connection.action('INSERT INTO [%s_new] SELECT %s FROM [%s]' % (table, keptColumnsNames, table)) + self.connection.action('INSERT INTO [%s_new] SELECT %s FROM [%s]' % (table, kept_columns_names, table)) # copy the old indexes from the old table result = self.connection.select("SELECT sql FROM sqlite_master WHERE tbl_name=? AND type='index'", [table]) - # remove the old table and rename the new table to take it's place + # remove the old table and rename the new table to take its place # noinspection SqlResolve self.connection.action('DROP TABLE [%s]' % table) # noinspection SqlResolve @@ -605,22 +597,19 @@ class SchemaUpgrade(object): # vacuum the db as we will have a lot of space to reclaim after dropping tables self.connection.action('VACUUM') - def checkDBVersion(self): - return self.connection.checkDBVersion() + def call_check_db_version(self): + return self.connection.check_db_version() - def incDBVersion(self): - new_version = self.checkDBVersion() + 1 + def inc_db_version(self): + new_version = self.call_check_db_version() + 1 # noinspection SqlConstantCondition self.connection.action('UPDATE db_version SET db_version = ? WHERE 1=1', [new_version]) return new_version - def setDBVersion(self, new_version, check_db_version=True): + def set_db_version(self, new_version, check_db_version=True): # noinspection SqlConstantCondition self.connection.action('UPDATE db_version SET db_version = ? WHERE 1=1', [new_version]) - return check_db_version and self.checkDBVersion() - - def listTables(self): - return self.list_tables() + return check_db_version and self.call_check_db_version() def do_query(self, queries): if not isinstance(queries, list): @@ -630,23 +619,23 @@ class SchemaUpgrade(object): for query in queries: tbl_name = re.findall(r'(?i)DROP.*?TABLE.*?\[?([^\s\]]+)', query) - if tbl_name and not self.hasTable(tbl_name[0]): + if tbl_name and not self.has_table(tbl_name[0]): continue tbl_name = re.findall(r'(?i)CREATE.*?TABLE.*?\s([^\s(]+)\s*\(', query) - if tbl_name and self.hasTable(tbl_name[0]): + if tbl_name and self.has_table(tbl_name[0]): continue self.connection.action(query) def finish(self, tbl_dropped=False): if tbl_dropped: self.connection.action('VACUUM') - self.incDBVersion() + self.inc_db_version() def upgrade_log(self, *args, **kwargs): self.connection.upgrade_log(*args, **kwargs) -def MigrationCode(my_db): +def migration_code(my_db): schema = { 0: sickgear.mainDB.InitialSchema, 9: sickgear.mainDB.AddSizeAndSceneNameFields, @@ -719,7 +708,7 @@ def MigrationCode(my_db): # 20002: sickgear.mainDB.AddCoolSickGearFeature3, } - db_version = my_db.checkDBVersion() + db_version = my_db.check_db_version() my_db.new_db = 0 == db_version logger.log(u'Detected database version: v%s' % db_version, logger.DEBUG) @@ -746,7 +735,7 @@ def MigrationCode(my_db): my_db.close() logger.log(u'Failed to update database with error: %s attempting recovery...' % ex(e), logger.ERROR) - if restoreDatabase(my_db.filename, db_version): + if _restore_database(my_db.filename, db_version): # initialize the main SB database logger.log_error_and_exit(u'Successfully restored database version: %s' % db_version) else: @@ -777,7 +766,7 @@ def backup_database(db_connection, filename, version): return logger.log(u'Backing up database before upgrade') - if not sickgear.helpers.backup_versioned_file(dbFilename(filename), version): + if not sickgear.helpers.backup_versioned_file(db_filename(filename), version): logger.log_error_and_exit(u'Database backup failed, abort upgrading database') else: logger.log(u'Proceeding with upgrade') @@ -841,7 +830,7 @@ def backup_all_dbs(target, compress=True, prefer_7z=True): optional compress with zip or 7z (python 3 only, external lib py7zr required) 7z falls back to zip if py7zr is not available - :param target: target folder to backup to + :param target: target folder for backup db :param compress: compress db backups :param prefer_7z: prefer 7z compression if available :return: success, message diff --git a/sickgear/providers/generic.py b/sickgear/providers/generic.py index a75600be..126cc747 100644 --- a/sickgear/providers/generic.py +++ b/sickgear/providers/generic.py @@ -193,7 +193,7 @@ class ProviderFailList(object): with self.lock: try: my_db = db.DBConnection('cache.db') - if my_db.hasTable('provider_fails'): + if my_db.has_table('provider_fails'): results = my_db.select('SELECT * FROM provider_fails WHERE prov_name = ?', [self.provider_name()]) self._fails = [] for r in results: @@ -210,7 +210,7 @@ class ProviderFailList(object): with self.lock: try: my_db = db.DBConnection('cache.db') - if my_db.hasTable('provider_fails'): + if my_db.has_table('provider_fails'): # noinspection PyCallByClass,PyTypeChecker time_limit = int(timestamp_near(datetime.datetime.now() - datetime.timedelta(days=28))) my_db.action('DELETE FROM provider_fails WHERE fail_time < ?', [time_limit]) @@ -281,7 +281,7 @@ class GenericProvider(object): def _load_fail_values(self): if hasattr(sickgear, 'DATA_DIR'): my_db = db.DBConnection('cache.db') - if my_db.hasTable('provider_fails_count'): + if my_db.has_table('provider_fails_count'): r = my_db.select('SELECT * FROM provider_fails_count WHERE prov_name = ?', [self.get_id()]) if r: self._failure_count = helpers.try_int(r[0]['failure_count'], 0) @@ -302,7 +302,7 @@ class GenericProvider(object): def _save_fail_value(self, field, value): my_db = db.DBConnection('cache.db') - if my_db.hasTable('provider_fails_count'): + if my_db.has_table('provider_fails_count'): r = my_db.action('UPDATE provider_fails_count SET %s = ? WHERE prov_name = ?' % field, [value, self.get_id()]) if 0 == r.rowcount: diff --git a/sickgear/tvcache.py b/sickgear/tvcache.py index 0cb50660..cdcb4b8a 100644 --- a/sickgear/tvcache.py +++ b/sickgear/tvcache.py @@ -42,7 +42,7 @@ class CacheDBConnection(db.DBConnection): # Create the table if it's not already there try: - if not self.hasTable('lastUpdate'): + if not self.has_table('lastUpdate'): self.action('CREATE TABLE lastUpdate (provider TEXT, time NUMERIC)') except (BaseException, Exception) as e: if ex(e) != 'table lastUpdate already exists': diff --git a/tests/migration_tests.py b/tests/migration_tests.py index 1682afdb..2a2d3a6c 100644 --- a/tests/migration_tests.py +++ b/tests/migration_tests.py @@ -48,7 +48,7 @@ class MigrationBasicTests(test.SickbeardTestDBCase): update.execute() sleep(0.1) - db.MigrationCode(my_db) + db.migration_code(my_db) my_db.close() # force python to garbage collect all db connections, so that the file can be deleted @@ -67,9 +67,9 @@ class MigrationBasicTests(test.SickbeardTestDBCase): # 0 -> 31 class OldInitialSchema(db.SchemaUpgrade): def execute(self): - db.backup_database(self.connection, 'sickbeard.db', self.checkDBVersion()) + db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - if not self.hasTable('tv_shows') and not self.hasTable('db_version'): + if not self.has_table('tv_shows') and not self.has_table('db_version'): queries = [ 'CREATE TABLE db_version (db_version INTEGER);', 'CREATE TABLE history (' @@ -105,7 +105,7 @@ class OldInitialSchema(db.SchemaUpgrade): self.connection.action(query) else: - cur_db_version = self.checkDBVersion() + cur_db_version = self.call_check_db_version() if cur_db_version < MIN_DB_VERSION: logger.log_error_and_exit( @@ -127,13 +127,13 @@ class OldInitialSchema(db.SchemaUpgrade): ' your database may be unusable due to their modifications.' ) - return self.checkDBVersion() + return self.call_check_db_version() class AddDefaultEpStatusToTvShows(db.SchemaUpgrade): def execute(self): - self.addColumn('tv_shows', 'default_ep_status', 'TEXT', '') - self.setDBVersion(41, check_db_version=False) + self.add_column('tv_shows', 'default_ep_status', 'TEXT', '') + self.set_db_version(41, check_db_version=False) if '__main__' == __name__: diff --git a/tests/test_lib.py b/tests/test_lib.py index d8be545b..e3fe8be3 100644 --- a/tests/test_lib.py +++ b/tests/test_lib.py @@ -195,16 +195,16 @@ def setup_test_db(): """upgrades the db to the latest version """ # upgrading the db - db.MigrationCode(db.DBConnection()) + db.migration_code(db.DBConnection()) # fix up any db problems - db.sanityCheckDatabase(db.DBConnection(), mainDB.MainSanityCheck) + db.sanity_check_db(db.DBConnection(), mainDB.MainSanityCheck) # and for cachedb too - db.upgradeDatabase(db.DBConnection('cache.db'), cache_db.InitialSchema) + db.upgrade_database(db.DBConnection('cache.db'), cache_db.InitialSchema) # and for faileddb too - db.upgradeDatabase(db.DBConnection('failed.db'), failed_db.InitialSchema) + db.upgrade_database(db.DBConnection('failed.db'), failed_db.InitialSchema) def teardown_test_db(): From 99aa339bbfaa24f3d643fde36ce1992ae2bdd1ef Mon Sep 17 00:00:00 2001 From: JackDandy Date: Sun, 5 Mar 2023 22:52:09 +0000 Subject: [PATCH 07/21] Add logging around the restart/shutdown event. --- CHANGES.md | 4 ++++ sickgear.py | 1 + sickgear/__init__.py | 3 +++ sickgear/event_queue.py | 10 ++++++++++ 4 files changed, 18 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 81e9d159..dc0f5f5c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,6 +5,10 @@ * Change requirements for pure py3 * Change codebase cleanups +[develop changelog] + +* Add logging around the restart/shutdown event + ### 3.27.11 (2023-03-06 23:40:00 UTC) diff --git a/sickgear.py b/sickgear.py index d15ad247..4ccb25de 100755 --- a/sickgear.py +++ b/sickgear.py @@ -769,6 +769,7 @@ class SickGear(object): return False def shutdown(self, ev_type): + logger.debug(f'Shutdown ev_type:{ev_type}, sickgear.started:{sickgear.started}') if sickgear.started: # stop all tasks sickgear.halt() diff --git a/sickgear/__init__.py b/sickgear/__init__.py index 3d7be274..53d7e3b7 100644 --- a/sickgear/__init__.py +++ b/sickgear/__init__.py @@ -1747,6 +1747,7 @@ def restart(soft=True, update_pkg=None): if update_pkg: MY_ARGS.append('--update-pkg') + logger.log(u'Trigger event restart') events.put(events.SystemEvent.RESTART) else: @@ -1770,8 +1771,10 @@ def sig_handler(signum=None, _=None): def halt(): global __INITIALIZED__, started + logger.debug('Check INIT_LOCK on halt') with INIT_LOCK: + logger.debug(f'Check __INITIALIZED__ on halt: {__INITIALIZED__}') if __INITIALIZED__: logger.log('Exiting threads') diff --git a/sickgear/event_queue.py b/sickgear/event_queue.py index 2975c380..0c894e00 100644 --- a/sickgear/event_queue.py +++ b/sickgear/event_queue.py @@ -32,7 +32,17 @@ class Events(threading.Thread): try: # get event type etype = self.queue.get(True, 1) + except moves.queue.Empty: + etype = 'Empty' + except(BaseException, Exception): + etype = None + if etype in (self.SystemEvent.RESTART, self.SystemEvent.SHUTDOWN, None, 'Empty'): + if etype in ('Empty',): + continue + from sickgear import logger + logger.debug(f'Callback {self.callback.__name__}(event type:{etype})') + try: # perform callback if we got an event type self.callback(etype) From 9fe62aa486367da17375441dd52ba591cf58742b Mon Sep 17 00:00:00 2001 From: JackDandy Date: Mon, 6 Mar 2023 19:24:50 +0000 Subject: [PATCH 08/21] Change improve perf by using generators with `any`. --- CHANGES.md | 2 ++ gui/slick/interfaces/default/inc_top.tmpl | 2 +- lib/plex/plex.py | 2 +- sickgear/classes.py | 2 +- sickgear/common.py | 4 ++-- sickgear/providers/generic.py | 12 ++++++------ sickgear/providers/newznab.py | 12 ++++++------ sickgear/scene_exceptions.py | 4 ++-- sickgear/tv.py | 2 +- sickgear/webapi.py | 4 ++-- 10 files changed, 24 insertions(+), 22 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index dc0f5f5c..e5107f87 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,6 +4,8 @@ * Change remove calls to legacy py2 fix encoding function * Change requirements for pure py3 * Change codebase cleanups +* Change improve perf by using generators with `any` + [develop changelog] diff --git a/gui/slick/interfaces/default/inc_top.tmpl b/gui/slick/interfaces/default/inc_top.tmpl index 2416f49f..11f1e410 100644 --- a/gui/slick/interfaces/default/inc_top.tmpl +++ b/gui/slick/interfaces/default/inc_top.tmpl @@ -105,7 +105,7 @@ #except #pass #end try -#if not any([x in $body_attr for x in ['back-art', 'pro', 'ii']]) +#if not any(x in $body_attr for x in ['back-art', 'pro', 'ii']) #set $parts = $body_attr.split('class="') #set $body_attr = ('class="%s '.join($parts), $parts[0] + ' class="%s"')[1 == len($parts)] % {0: '', 1: 'pro', 2: 'pro ii'}.get(getattr($sickgear, 'DISPLAY_SHOW_VIEWMODE', 0)) #end if diff --git a/lib/plex/plex.py b/lib/plex/plex.py index 26f5436c..18996ce7 100644 --- a/lib/plex/plex.py +++ b/lib/plex/plex.py @@ -381,7 +381,7 @@ class Plex(object): section_path = re.sub(r'[/\\]+', '/', section.find('Location').get('path').lower()) section_path = re.sub(r'^(.{,2})[/\\]', '', section_path) - if not any([section_path in path for path in self.section_filter_path]): + if not any(section_path in path for path in self.section_filter_path): continue if section.get('key') not in self.ignore_sections \ diff --git a/sickgear/classes.py b/sickgear/classes.py index a10360d0..f6e5e564 100644 --- a/sickgear/classes.py +++ b/sickgear/classes.py @@ -195,7 +195,7 @@ class ShowInfoFilter(object): return isinstance(show_info, dict) \ and 'seriesname' in show_info \ and isinstance(show_info['seriesname'], string_types) \ - and any([x.search(show_info['seriesname']) for x in self.bad_names]) + and any(x.search(show_info['seriesname']) for x in self.bad_names) @staticmethod def _fix_firstaired(show_info): diff --git a/sickgear/common.py b/sickgear/common.py index ce713630..e66b946a 100644 --- a/sickgear/common.py +++ b/sickgear/common.py @@ -745,9 +745,9 @@ class NeededQualities(object): else: if not self.need_sd and min(wanted_qualities) <= NeededQualities.max_sd: self.need_sd = True - if not self.need_hd and any([i in NeededQualities.hd_qualities for i in wanted_qualities]): + if not self.need_hd and any(i in NeededQualities.hd_qualities for i in wanted_qualities): self.need_hd = True - if not self.need_webdl and any([i in NeededQualities.webdl_qualities for i in wanted_qualities]): + if not self.need_webdl and any(i in NeededQualities.webdl_qualities for i in wanted_qualities): self.need_webdl = True if not self.need_uhd and max(wanted_qualities) > NeededQualities.max_hd: self.need_uhd = True diff --git a/sickgear/providers/generic.py b/sickgear/providers/generic.py index 126cc747..eca6d7a2 100644 --- a/sickgear/providers/generic.py +++ b/sickgear/providers/generic.py @@ -501,8 +501,8 @@ class GenericProvider(object): if time_left > datetime.timedelta(seconds=0): if log_warning: # Ensure provider name output (e.g. when displaying config/provs) instead of e.g. thread "Tornado" - prepend = ('[%s] :: ' % self.name, '')[any([x.name in threading.current_thread().name - for x in sickgear.providers.sorted_sources()])] + prepend = ('[%s] :: ' % self.name, '')[any(x.name in threading.current_thread().name + for x in sickgear.providers.sorted_sources())] logger.log('%sToo many requests reached at %s, waiting for %s' % ( prepend, self.fmt_delta(self.tmr_limit_time), self.fmt_delta(time_left)), logger.WARNING) return use_tmr_limit @@ -1009,9 +1009,9 @@ class GenericProvider(object): headers = [re.sub( r'\s+', '', - ((any([cell.get_text()]) and any([rc[x].search(cell.get_text()) for x in iterkeys(rc)]) and cell.get_text()) - or (cell.attrs.get('id') and any([rc[x].search(cell['id']) for x in iterkeys(rc)]) and cell['id']) - or (cell.attrs.get('title') and any([rc[x].search(cell['title']) for x in iterkeys(rc)]) and cell['title']) + ((any([cell.get_text()]) and any(rc[x].search(cell.get_text()) for x in iterkeys(rc)) and cell.get_text()) + or (cell.attrs.get('id') and any(rc[x].search(cell['id']) for x in iterkeys(rc)) and cell['id']) + or (cell.attrs.get('title') and any(rc[x].search(cell['title']) for x in iterkeys(rc)) and cell['title']) or next(iter(set(filter(lambda rz: any([rz]), [ next(iter(set(filter(lambda ry: any([ry]), [ cell.find(tag, **p) for p in [{attr: rc[x]} for x in iterkeys(rc)]]))), {}).get(attr) @@ -1932,7 +1932,7 @@ class TorrentProvider(GenericProvider): url_list = list(map(lambda u: '%s/' % u.rstrip('/'), url_list)) last_url, expire = sickgear.PROVIDER_HOMES.get(self.get_id(), ('', None)) url_drop = (url_exclude or []) + getattr(self, 'url_drop', []) - if url_drop and any([url in last_url for url in url_drop]): # deprecate url + if url_drop and any(url in last_url for url in url_drop): # deprecate url last_url = '' if 'site down' == last_url: diff --git a/sickgear/providers/newznab.py b/sickgear/providers/newznab.py index fc701941..2fe12d6a 100644 --- a/sickgear/providers/newznab.py +++ b/sickgear/providers/newznab.py @@ -911,9 +911,9 @@ class NewznabProvider(generic.NZBProvider): # category ids cat = [] if 'Episode' == mode or 'Season' == mode: - if not (any([x in params for x in - [v for c, v in iteritems(self.caps) - if c not in [NewznabConstants.SEARCH_EPISODE, NewznabConstants.SEARCH_SEASON]]])): + if not (any(x in params for x in + [v for c, v in iteritems(self.caps) + if c not in [NewznabConstants.SEARCH_EPISODE, NewznabConstants.SEARCH_SEASON]])): logger.log('Show is missing either an id or search term for search') continue @@ -938,7 +938,7 @@ class NewznabProvider(generic.NZBProvider): request_params = base_params.copy() # if ('Propers' == mode or 'nzbs_org' == self.get_id()) \ if 'Propers' == mode \ - and 'q' in params and not (any([x in params for x in ['season', 'ep']])): + and 'q' in params and not (any(x in params for x in ['season', 'ep'])): request_params['t'] = 'search' request_params.update(params) @@ -1048,10 +1048,10 @@ class NewznabProvider(generic.NZBProvider): if exit_log: self._log_search(mode, len(results), search_url) - if not try_all_searches and any([x in request_params for x in [ + if not try_all_searches and any(x in request_params for x in [ v for c, v in iteritems(self.caps) if c not in [NewznabConstants.SEARCH_EPISODE, NewznabConstants.SEARCH_SEASON, - NewznabConstants.SEARCH_TEXT]]]) and len(results): + NewznabConstants.SEARCH_TEXT]]) and len(results): break return results, n_spaces diff --git a/sickgear/scene_exceptions.py b/sickgear/scene_exceptions.py index b7ee204a..ceaa42d4 100644 --- a/sickgear/scene_exceptions.py +++ b/sickgear/scene_exceptions.py @@ -583,6 +583,6 @@ def has_abs_episodes(ep_obj=None, name=None): :return: :rtype: bool """ - return any([(name or ep_obj.show_obj.name or '').lower().startswith(x.lower()) for x in [ + return any((name or ep_obj.show_obj.name or '').lower().startswith(x.lower()) for x in [ 'The Eighties', 'The Making of the Mob', 'The Night Of', 'Roots 2016', 'Trepalium' - ]]) + ]) diff --git a/sickgear/tv.py b/sickgear/tv.py index 8a6c274f..23641792 100644 --- a/sickgear/tv.py +++ b/sickgear/tv.py @@ -3042,7 +3042,7 @@ class TVShow(TVShowBase): page_url = 'https://www.imdb.com/title/{0}/'.format(imdb_id) try: response = requests.head(page_url, allow_redirects=True) - if response.history and any([h for h in response.history if 301 == h.status_code]): + if response.history and any(h for h in response.history if 301 == h.status_code): return helpers.parse_imdb_id(response.url) except (BaseException, Exception): pass diff --git a/sickgear/webapi.py b/sickgear/webapi.py index 40246086..b91e1625 100644 --- a/sickgear/webapi.py +++ b/sickgear/webapi.py @@ -600,7 +600,7 @@ class ApiCall(object): elif isinstance(value, string_types): if '|' in value: li = [int(v) for v in value.split('|')] - if any([not isinstance(v, integer_types) for v in li]): + if any(not isinstance(v, integer_types) for v in li): error = True else: value = li @@ -610,7 +610,7 @@ class ApiCall(object): error = True else: li = value.split('|') - if any([sub_type is not type(v) for v in li]): + if any(sub_type is not type(v) for v in li): error = True else: value = li From e08baa4f0b8d44ce6b548ac13a379a310195fe52 Mon Sep 17 00:00:00 2001 From: JackDandy Date: Mon, 6 Mar 2023 19:54:59 +0000 Subject: [PATCH 09/21] =?UTF-8?q?Update=20html5lib=201.1=20(f87487a)=20?= =?UTF-8?q?=E2=86=92=201.2-dev=20(3e500bb).?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CHANGES.md | 1 + lib/html5lib/__init__.py | 2 +- lib/html5lib/_ihatexml.py | 5 +---- lib/html5lib/_inputstream.py | 2 +- lib/html5lib/constants.py | 36 +++++++++++++++++++++--------- lib/html5lib/filters/sanitizer.py | 8 +++++++ lib/html5lib/html5parser.py | 9 +++++--- lib/html5lib/serializer.py | 4 ++-- lib/html5lib/treebuilders/base.py | 1 + lib/html5lib/treebuilders/etree.py | 4 ++-- lib/html5lib/treewalkers/etree.py | 2 +- 11 files changed, 49 insertions(+), 25 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index e5107f87..75888e5b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,6 @@ ### 3.28.0 (2023-xx-xx xx:xx:00 UTC) +* Update html5lib 1.1 (f87487a) to 1.2-dev (3e500bb) * Update package resource API 63.2.0 (3ae44cd) to 67.3.2 (b9bf2ec) * Change remove calls to legacy py2 fix encoding function * Change requirements for pure py3 diff --git a/lib/html5lib/__init__.py b/lib/html5lib/__init__.py index 320e0c3b..7b854f99 100644 --- a/lib/html5lib/__init__.py +++ b/lib/html5lib/__init__.py @@ -32,4 +32,4 @@ __all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder", # this has to be at the top level, see how setup.py parses this #: Distribution version number. -__version__ = "1.1" +__version__ = "1.2-dev" diff --git a/lib/html5lib/_ihatexml.py b/lib/html5lib/_ihatexml.py index 3ff803c1..d725eabd 100644 --- a/lib/html5lib/_ihatexml.py +++ b/lib/html5lib/_ihatexml.py @@ -104,18 +104,15 @@ def charStringToList(chars): charRanges = [item.strip() for item in chars.split(" | ")] rv = [] for item in charRanges: - foundMatch = False for regexp in (reChar, reCharRange): match = regexp.match(item) if match is not None: rv.append([hexToInt(item) for item in match.groups()]) if len(rv[-1]) == 1: rv[-1] = rv[-1] * 2 - foundMatch = True break - if not foundMatch: + else: assert len(item) == 1 - rv.append([ord(item)] * 2) rv = normaliseCharList(rv) return rv diff --git a/lib/html5lib/_inputstream.py b/lib/html5lib/_inputstream.py index 0207dd21..a93b5a4e 100644 --- a/lib/html5lib/_inputstream.py +++ b/lib/html5lib/_inputstream.py @@ -324,7 +324,7 @@ class HTMLUnicodeInputStream(object): except KeyError: if __debug__: for c in characters: - assert(ord(c) < 128) + assert ord(c) < 128 regex = "".join(["\\x%02x" % ord(c) for c in characters]) if not opposite: regex = "^%s" % regex diff --git a/lib/html5lib/constants.py b/lib/html5lib/constants.py index fe3e237c..2fa4146d 100644 --- a/lib/html5lib/constants.py +++ b/lib/html5lib/constants.py @@ -557,23 +557,36 @@ headingElements = ( ) voidElements = frozenset([ + "area", "base", - "command", - "event-source", + "br", + "col", + "command", # removed ^1 + "embed", + "event-source", # renamed and later removed ^2 + "hr", + "img", + "input", "link", "meta", - "hr", - "br", - "img", - "embed", - "param", - "area", - "col", - "input", + "param", # deprecated ^3 "source", - "track" + "track", + "wbr", ]) +# Removals and deprecations in the HTML 5 spec: +# ^1: command +# http://lists.whatwg.org/pipermail/whatwg-whatwg.org/2012-December/038472.html +# https://github.com/whatwg/html/commit/9e2e25f4ae90969a7c64e0763c98548a35b50af8 +# ^2: event-source +# renamed to eventsource in 7/2008: +# https://github.com/whatwg/html/commit/d157945d0285b4463a04b57318da0c4b300a99e7 +# removed entirely in 2/2009: +# https://github.com/whatwg/html/commit/43cbdbfbb7eb74b0d65e0f4caab2020c0b2a16ff +# ^3: param +# https://developer.mozilla.org/en-US/docs/Web/HTML/Element/param + cdataElements = frozenset(['title', 'textarea']) rcdataElements = frozenset([ @@ -604,6 +617,7 @@ booleanAttributes = { "button": frozenset(["disabled", "autofocus"]), "input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]), "select": frozenset(["disabled", "readonly", "autofocus", "multiple"]), + "ol": frozenset(["reversed"]), "output": frozenset(["disabled", "readonly"]), "iframe": frozenset(["seamless"]), } diff --git a/lib/html5lib/filters/sanitizer.py b/lib/html5lib/filters/sanitizer.py index 70ef9066..ea2c5dd3 100644 --- a/lib/html5lib/filters/sanitizer.py +++ b/lib/html5lib/filters/sanitizer.py @@ -113,6 +113,7 @@ allowed_elements = frozenset(( (namespaces['html'], 'strike'), (namespaces['html'], 'strong'), (namespaces['html'], 'sub'), + (namespaces['html'], 'summary'), (namespaces['html'], 'sup'), (namespaces['html'], 'table'), (namespaces['html'], 'tbody'), @@ -128,6 +129,7 @@ allowed_elements = frozenset(( (namespaces['html'], 'ul'), (namespaces['html'], 'var'), (namespaces['html'], 'video'), + (namespaces['html'], 'wbr'), (namespaces['mathml'], 'maction'), (namespaces['mathml'], 'math'), (namespaces['mathml'], 'merror'), @@ -363,6 +365,7 @@ allowed_attributes = frozenset(( (None, 'maxsize'), (None, 'minsize'), (None, 'other'), + (None, 'reversed'), (None, 'rowalign'), (None, 'rowalign'), (None, 'rowalign'), @@ -373,6 +376,7 @@ allowed_attributes = frozenset(( (None, 'scriptlevel'), (None, 'selection'), (None, 'separator'), + (None, 'start'), (None, 'stretchy'), (None, 'width'), (None, 'width'), @@ -594,6 +598,10 @@ allowed_css_properties = frozenset(( 'height', 'letter-spacing', 'line-height', + 'max-height', + 'min-height', + 'max-width', + 'min-width', 'overflow', 'pause', 'pause-after', diff --git a/lib/html5lib/html5parser.py b/lib/html5lib/html5parser.py index 74d829d9..4c2d4c75 100644 --- a/lib/html5lib/html5parser.py +++ b/lib/html5lib/html5parser.py @@ -115,6 +115,9 @@ class HTMLParser(object): if tree is None: tree = treebuilders.getTreeBuilder("etree") + elif isinstance(tree, str): + tree = treebuilders.getTreeBuilder(tree) + self.tree = tree(namespaceHTMLElements) self.errors = [] @@ -1002,8 +1005,8 @@ def getPhases(debug): self.tree.insertText(token["data"]) # This must be bad for performance if (self.parser.framesetOK and - any([char not in spaceCharacters - for char in token["data"]])): + any(char not in spaceCharacters + for char in token["data"])): self.parser.framesetOK = False def processSpaceCharactersNonPre(self, token): @@ -1850,7 +1853,7 @@ def getPhases(debug): def flushCharacters(self): data = "".join([item["data"] for item in self.characterTokens]) - if any([item not in spaceCharacters for item in data]): + if any(item not in spaceCharacters for item in data): token = {"type": tokenTypes["Characters"], "data": data} self.parser.phases["inTable"].insertText(token) elif data: diff --git a/lib/html5lib/serializer.py b/lib/html5lib/serializer.py index c66df683..a171ac1c 100644 --- a/lib/html5lib/serializer.py +++ b/lib/html5lib/serializer.py @@ -222,14 +222,14 @@ class HTMLSerializer(object): self.strict = False def encode(self, string): - assert(isinstance(string, text_type)) + assert isinstance(string, text_type) if self.encoding: return string.encode(self.encoding, "htmlentityreplace") else: return string def encodeStrict(self, string): - assert(isinstance(string, text_type)) + assert isinstance(string, text_type) if self.encoding: return string.encode(self.encoding, "strict") else: diff --git a/lib/html5lib/treebuilders/base.py b/lib/html5lib/treebuilders/base.py index e4a3d710..020d7e15 100644 --- a/lib/html5lib/treebuilders/base.py +++ b/lib/html5lib/treebuilders/base.py @@ -121,6 +121,7 @@ class Node(object): class ActiveFormattingElements(list): def append(self, node): + """Append node to the end of the list.""" equalCount = 0 if node != Marker: for element in self[::-1]: diff --git a/lib/html5lib/treebuilders/etree.py b/lib/html5lib/treebuilders/etree.py index 086bed4e..0b745081 100644 --- a/lib/html5lib/treebuilders/etree.py +++ b/lib/html5lib/treebuilders/etree.py @@ -108,7 +108,7 @@ def getETreeBuilder(ElementTreeImplementation, fullTree=False): node.parent = None def insertText(self, data, insertBefore=None): - if not(len(self._element)): + if not len(self._element): if not self._element.text: self._element.text = "" self._element.text += data @@ -201,7 +201,7 @@ def getETreeBuilder(ElementTreeImplementation, fullTree=False): rv = [] def serializeElement(element, indent=0): - if not(hasattr(element, "tag")): + if not hasattr(element, "tag"): element = element.getroot() if element.tag == "": if element.get("publicId") or element.get("systemId"): diff --git a/lib/html5lib/treewalkers/etree.py b/lib/html5lib/treewalkers/etree.py index 44653372..411a1d45 100644 --- a/lib/html5lib/treewalkers/etree.py +++ b/lib/html5lib/treewalkers/etree.py @@ -37,7 +37,7 @@ def getETreeBuilder(ElementTreeImplementation): else: node = elt - if not(hasattr(node, "tag")): + if not hasattr(node, "tag"): node = node.getroot() if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"): From 664d9b01293a4f0fdfa9cd9f2c08e24941c96ca3 Mon Sep 17 00:00:00 2001 From: JackDandy Date: Tue, 7 Mar 2023 00:57:33 +0000 Subject: [PATCH 10/21] =?UTF-8?q?Update=20urllib3=201.26.13=20(25fbd5f)=20?= =?UTF-8?q?=E2=86=92=201.26.14=20(a06c05c)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CHANGES.md | 1 + lib/urllib3/_version.py | 2 +- lib/urllib3/connection.py | 5 +++++ lib/urllib3/connectionpool.py | 2 +- lib/urllib3/util/timeout.py | 9 ++++++--- lib/urllib3/util/url.py | 4 ++-- 6 files changed, 16 insertions(+), 7 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 75888e5b..f191b955 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,6 +2,7 @@ * Update html5lib 1.1 (f87487a) to 1.2-dev (3e500bb) * Update package resource API 63.2.0 (3ae44cd) to 67.3.2 (b9bf2ec) +* Update urllib3 1.26.13 (25fbd5f) to 1.26.14 (a06c05c) * Change remove calls to legacy py2 fix encoding function * Change requirements for pure py3 * Change codebase cleanups diff --git a/lib/urllib3/_version.py b/lib/urllib3/_version.py index 308d7f28..7c031661 100644 --- a/lib/urllib3/_version.py +++ b/lib/urllib3/_version.py @@ -1,2 +1,2 @@ # This file is protected via CODEOWNERS -__version__ = "1.26.13" +__version__ = "1.26.14" diff --git a/lib/urllib3/connection.py b/lib/urllib3/connection.py index 10fb36c4..54b96b19 100644 --- a/lib/urllib3/connection.py +++ b/lib/urllib3/connection.py @@ -229,6 +229,11 @@ class HTTPConnection(_HTTPConnection, object): ) def request(self, method, url, body=None, headers=None): + # Update the inner socket's timeout value to send the request. + # This only triggers if the connection is re-used. + if getattr(self, "sock", None) is not None: + self.sock.settimeout(self.timeout) + if headers is None: headers = {} else: diff --git a/lib/urllib3/connectionpool.py b/lib/urllib3/connectionpool.py index 70873927..c23d736b 100644 --- a/lib/urllib3/connectionpool.py +++ b/lib/urllib3/connectionpool.py @@ -379,7 +379,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() - conn.timeout = timeout_obj.connect_timeout + conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout) # Trigger any extra validation we need to do. try: diff --git a/lib/urllib3/util/timeout.py b/lib/urllib3/util/timeout.py index ff69593b..78e18a62 100644 --- a/lib/urllib3/util/timeout.py +++ b/lib/urllib3/util/timeout.py @@ -2,9 +2,8 @@ from __future__ import absolute_import import time -# The default socket timeout, used by httplib to indicate that no timeout was -# specified by the user -from socket import _GLOBAL_DEFAULT_TIMEOUT +# The default socket timeout, used by httplib to indicate that no timeout was; specified by the user +from socket import _GLOBAL_DEFAULT_TIMEOUT, getdefaulttimeout from ..exceptions import TimeoutStateError @@ -116,6 +115,10 @@ class Timeout(object): # __str__ provided for backwards compatibility __str__ = __repr__ + @classmethod + def resolve_default_timeout(cls, timeout): + return getdefaulttimeout() if timeout is cls.DEFAULT_TIMEOUT else timeout + @classmethod def _validate_timeout(cls, value, name): """Check that a timeout attribute is valid. diff --git a/lib/urllib3/util/url.py b/lib/urllib3/util/url.py index 3a169a43..e5682d3b 100644 --- a/lib/urllib3/util/url.py +++ b/lib/urllib3/util/url.py @@ -50,7 +50,7 @@ _variations = [ "(?:(?:%(hex)s:){0,6}%(hex)s)?::", ] -UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~" +UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._\-~" IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")" ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+" IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]" @@ -303,7 +303,7 @@ def _normalize_host(host, scheme): def _idna_encode(name): - if name and any([ord(x) > 128 for x in name]): + if name and any(ord(x) >= 128 for x in name): try: import idna except ImportError: From 71a0203c024187aa60a7325f7ef30b368d60b6b3 Mon Sep 17 00:00:00 2001 From: JackDandy Date: Tue, 7 Mar 2023 03:04:03 +0000 Subject: [PATCH 11/21] =?UTF-8?q?Update=20package=20resource=20API=2067.3.?= =?UTF-8?q?2=20(b9bf2ec)=20=E2=86=92=2067.5.1=20(f51eccd).?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CHANGES.md | 3 +- lib/pkg_resources/__init__.py | 76 ++++++++- .../_vendor/importlib_resources/_common.py | 147 +++++++++++++++--- .../_vendor/importlib_resources/_compat.py | 10 ++ .../_vendor/importlib_resources/_legacy.py | 3 +- .../_vendor/importlib_resources/abc.py | 65 ++++++-- .../_vendor/importlib_resources/readers.py | 16 +- .../_vendor/importlib_resources/simple.py | 70 ++++----- lib/pkg_resources/_vendor/vendored.txt | 2 +- 9 files changed, 296 insertions(+), 96 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f191b955..99d74bb7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,7 @@ ### 3.28.0 (2023-xx-xx xx:xx:00 UTC) * Update html5lib 1.1 (f87487a) to 1.2-dev (3e500bb) -* Update package resource API 63.2.0 (3ae44cd) to 67.3.2 (b9bf2ec) +* Update package resource API 63.2.0 (3ae44cd) to 67.5.1 (f51eccd) * Update urllib3 1.26.13 (25fbd5f) to 1.26.14 (a06c05c) * Change remove calls to legacy py2 fix encoding function * Change requirements for pure py3 @@ -12,6 +12,7 @@ [develop changelog] * Add logging around the restart/shutdown event +* Update package resource API 63.2.0 (3ae44cd) to 67.3.2 (b9bf2ec) ### 3.27.11 (2023-03-06 23:40:00 UTC) diff --git a/lib/pkg_resources/__init__.py b/lib/pkg_resources/__init__.py index 1eb3f9e2..a73a1df3 100644 --- a/lib/pkg_resources/__init__.py +++ b/lib/pkg_resources/__init__.py @@ -12,6 +12,12 @@ The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. + +This module is deprecated. Users are directed to +`importlib.resources `_ +and +`importlib.metadata `_ +instead. """ import sys @@ -112,6 +118,12 @@ _namespace_handlers = None _namespace_packages = None +warnings.warn("pkg_resources is deprecated as an API", DeprecationWarning) + + +_PEP440_FALLBACK = re.compile(r"^v?(?P(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)*)", re.I) + + class PEP440Warning(RuntimeWarning): """ Used when there is an issue with a version or specifier not complying with @@ -914,9 +926,7 @@ class WorkingSet: list(map(shadow_set.add, self)) for project_name in plugin_projects: - for dist in plugin_env[project_name]: - req = [dist.as_requirement()] try: @@ -1389,6 +1399,38 @@ def safe_version(version): return re.sub('[^A-Za-z0-9.]+', '-', version) +def _forgiving_version(version): + """Fallback when ``safe_version`` is not safe enough + >>> parse_version(_forgiving_version('0.23ubuntu1')) + + >>> parse_version(_forgiving_version('0.23-')) + + >>> parse_version(_forgiving_version('0.-_')) + + >>> parse_version(_forgiving_version('42.+?1')) + + >>> parse_version(_forgiving_version('hello world')) + + """ + version = version.replace(' ', '.') + match = _PEP440_FALLBACK.search(version) + if match: + safe = match["safe"] + rest = version[len(safe):] + else: + safe = "0" + rest = version + local = f"sanitized.{_safe_segment(rest)}".strip(".") + return f"{safe}.dev0+{local}" + + +def _safe_segment(segment): + """Convert an arbitrary string into a safe segment""" + segment = re.sub('[^A-Za-z0-9.]+', '-', segment) + segment = re.sub('-[^A-Za-z0-9]+', '-', segment) + return re.sub(r'\.[^A-Za-z0-9]+', '.', segment).strip(".-") + + def safe_extra(extra): """Convert an arbitrary string to a standard 'extra' name @@ -1822,7 +1864,6 @@ class ZipProvider(EggProvider): # FIXME: 'ZipProvider._extract_resource' is too complex (12) def _extract_resource(self, manager, zip_path): # noqa: C901 - if zip_path in self._index(): for name in self._index()[zip_path]: last = self._extract_resource(manager, os.path.join(zip_path, name)) @@ -1836,7 +1877,6 @@ class ZipProvider(EggProvider): '"os.rename" and "os.unlink" are not supported ' 'on this platform' ) try: - real_path = manager.get_cache_path(self.egg_name, self._parts(zip_path)) if self._is_current(real_path, zip_path): @@ -2637,7 +2677,7 @@ class Distribution: @property def hashcmp(self): return ( - self.parsed_version, + self._forgiving_parsed_version, self.precedence, self.key, self.location, @@ -2695,6 +2735,32 @@ class Distribution: return self._parsed_version + @property + def _forgiving_parsed_version(self): + try: + return self.parsed_version + except packaging.version.InvalidVersion as ex: + self._parsed_version = parse_version(_forgiving_version(self.version)) + + notes = "\n".join(getattr(ex, "__notes__", [])) # PEP 678 + msg = f"""!!\n\n + ************************************************************************* + {str(ex)}\n{notes} + + This is a long overdue deprecation. + For the time being, `pkg_resources` will use `{self._parsed_version}` + as a replacement to avoid breaking existing environments, + but no future compatibility is guaranteed. + + If you maintain package {self.project_name} you should implement + the relevant changes to adequate the project to PEP 440 immediately. + ************************************************************************* + \n\n!! + """ + warnings.warn(msg, DeprecationWarning) + + return self._parsed_version + @property def version(self): try: diff --git a/lib/pkg_resources/_vendor/importlib_resources/_common.py b/lib/pkg_resources/_vendor/importlib_resources/_common.py index a12e2c75..3c6de1cf 100644 --- a/lib/pkg_resources/_vendor/importlib_resources/_common.py +++ b/lib/pkg_resources/_vendor/importlib_resources/_common.py @@ -5,25 +5,58 @@ import functools import contextlib import types import importlib +import inspect +import warnings +import itertools -from typing import Union, Optional +from typing import Union, Optional, cast from .abc import ResourceReader, Traversable from ._compat import wrap_spec Package = Union[types.ModuleType, str] +Anchor = Package -def files(package): - # type: (Package) -> Traversable +def package_to_anchor(func): """ - Get a Traversable resource from a package + Replace 'package' parameter as 'anchor' and warn about the change. + + Other errors should fall through. + + >>> files('a', 'b') + Traceback (most recent call last): + TypeError: files() takes from 0 to 1 positional arguments but 2 were given """ - return from_package(get_package(package)) + undefined = object() + + @functools.wraps(func) + def wrapper(anchor=undefined, package=undefined): + if package is not undefined: + if anchor is not undefined: + return func(anchor, package) + warnings.warn( + "First parameter to files is renamed to 'anchor'", + DeprecationWarning, + stacklevel=2, + ) + return func(package) + elif anchor is undefined: + return func() + return func(anchor) + + return wrapper -def get_resource_reader(package): - # type: (types.ModuleType) -> Optional[ResourceReader] +@package_to_anchor +def files(anchor: Optional[Anchor] = None) -> Traversable: + """ + Get a Traversable resource for an anchor. + """ + return from_package(resolve(anchor)) + + +def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]: """ Return the package's loader if it's a ResourceReader. """ @@ -39,24 +72,39 @@ def get_resource_reader(package): return reader(spec.name) # type: ignore -def resolve(cand): - # type: (Package) -> types.ModuleType - return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand) +@functools.singledispatch +def resolve(cand: Optional[Anchor]) -> types.ModuleType: + return cast(types.ModuleType, cand) -def get_package(package): - # type: (Package) -> types.ModuleType - """Take a package name or module object and return the module. +@resolve.register +def _(cand: str) -> types.ModuleType: + return importlib.import_module(cand) - Raise an exception if the resolved module is not a package. + +@resolve.register +def _(cand: None) -> types.ModuleType: + return resolve(_infer_caller().f_globals['__name__']) + + +def _infer_caller(): """ - resolved = resolve(package) - if wrap_spec(resolved).submodule_search_locations is None: - raise TypeError(f'{package!r} is not a package') - return resolved + Walk the stack and find the frame of the first caller not in this module. + """ + + def is_this_file(frame_info): + return frame_info.filename == __file__ + + def is_wrapper(frame_info): + return frame_info.function == 'wrapper' + + not_this_file = itertools.filterfalse(is_this_file, inspect.stack()) + # also exclude 'wrapper' due to singledispatch in the call stack + callers = itertools.filterfalse(is_wrapper, not_this_file) + return next(callers).frame -def from_package(package): +def from_package(package: types.ModuleType): """ Return a Traversable object for the given package. @@ -67,7 +115,14 @@ def from_package(package): @contextlib.contextmanager -def _tempfile(reader, suffix=''): +def _tempfile( + reader, + suffix='', + # gh-93353: Keep a reference to call os.remove() in late Python + # finalization. + *, + _os_remove=os.remove, +): # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' # blocks due to the need to close the temporary file to work on Windows # properly. @@ -81,18 +136,35 @@ def _tempfile(reader, suffix=''): yield pathlib.Path(raw_path) finally: try: - os.remove(raw_path) + _os_remove(raw_path) except FileNotFoundError: pass +def _temp_file(path): + return _tempfile(path.read_bytes, suffix=path.name) + + +def _is_present_dir(path: Traversable) -> bool: + """ + Some Traversables implement ``is_dir()`` to raise an + exception (i.e. ``FileNotFoundError``) when the + directory doesn't exist. This function wraps that call + to always return a boolean and only return True + if there's a dir and it exists. + """ + with contextlib.suppress(FileNotFoundError): + return path.is_dir() + return False + + @functools.singledispatch def as_file(path): """ Given a Traversable object, return that object as a path on the local file system in a context manager. """ - return _tempfile(path.read_bytes, suffix=path.name) + return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) @as_file.register(pathlib.Path) @@ -102,3 +174,34 @@ def _(path): Degenerate behavior for pathlib.Path objects. """ yield path + + +@contextlib.contextmanager +def _temp_path(dir: tempfile.TemporaryDirectory): + """ + Wrap tempfile.TemporyDirectory to return a pathlib object. + """ + with dir as result: + yield pathlib.Path(result) + + +@contextlib.contextmanager +def _temp_dir(path): + """ + Given a traversable dir, recursively replicate the whole tree + to the file system in a context manager. + """ + assert path.is_dir() + with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: + yield _write_contents(temp_dir, path) + + +def _write_contents(target, source): + child = target.joinpath(source.name) + if source.is_dir(): + child.mkdir() + for item in source.iterdir(): + _write_contents(child, item) + else: + child.write_bytes(source.read_bytes()) + return child diff --git a/lib/pkg_resources/_vendor/importlib_resources/_compat.py b/lib/pkg_resources/_vendor/importlib_resources/_compat.py index cb9fc820..8b5b1d28 100644 --- a/lib/pkg_resources/_vendor/importlib_resources/_compat.py +++ b/lib/pkg_resources/_vendor/importlib_resources/_compat.py @@ -1,9 +1,12 @@ # flake8: noqa import abc +import os import sys import pathlib from contextlib import suppress +from typing import Union + if sys.version_info >= (3, 10): from zipfile import Path as ZipPath # type: ignore @@ -96,3 +99,10 @@ def wrap_spec(package): from . import _adapters return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) + + +if sys.version_info >= (3, 9): + StrPath = Union[str, os.PathLike[str]] +else: + # PathLike is only subscriptable at runtime in 3.9+ + StrPath = Union[str, "os.PathLike[str]"] diff --git a/lib/pkg_resources/_vendor/importlib_resources/_legacy.py b/lib/pkg_resources/_vendor/importlib_resources/_legacy.py index 1d5d3f1f..b1ea8105 100644 --- a/lib/pkg_resources/_vendor/importlib_resources/_legacy.py +++ b/lib/pkg_resources/_vendor/importlib_resources/_legacy.py @@ -27,8 +27,7 @@ def deprecated(func): return wrapper -def normalize_path(path): - # type: (Any) -> str +def normalize_path(path: Any) -> str: """Normalize a path by ensuring it is a string. If the resulting string contains path separators, an exception is raised. diff --git a/lib/pkg_resources/_vendor/importlib_resources/abc.py b/lib/pkg_resources/_vendor/importlib_resources/abc.py index d39dc1ad..23b6aeaf 100644 --- a/lib/pkg_resources/_vendor/importlib_resources/abc.py +++ b/lib/pkg_resources/_vendor/importlib_resources/abc.py @@ -1,7 +1,13 @@ import abc -from typing import BinaryIO, Iterable, Text +import io +import itertools +import pathlib +from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional -from ._compat import runtime_checkable, Protocol +from ._compat import runtime_checkable, Protocol, StrPath + + +__all__ = ["ResourceReader", "Traversable", "TraversableResources"] class ResourceReader(metaclass=abc.ABCMeta): @@ -46,27 +52,34 @@ class ResourceReader(metaclass=abc.ABCMeta): raise FileNotFoundError +class TraversalError(Exception): + pass + + @runtime_checkable class Traversable(Protocol): """ An object with a subset of pathlib.Path methods suitable for traversing directories and opening files. + + Any exceptions that occur when accessing the backing resource + may propagate unaltered. """ @abc.abstractmethod - def iterdir(self): + def iterdir(self) -> Iterator["Traversable"]: """ Yield Traversable objects in self """ - def read_bytes(self): + def read_bytes(self) -> bytes: """ Read contents of self as bytes """ with self.open('rb') as strm: return strm.read() - def read_text(self, encoding=None): + def read_text(self, encoding: Optional[str] = None) -> str: """ Read contents of self as text """ @@ -85,13 +98,32 @@ class Traversable(Protocol): Return True if self is a file """ - @abc.abstractmethod - def joinpath(self, child): - """ - Return Traversable child in self + def joinpath(self, *descendants: StrPath) -> "Traversable": """ + Return Traversable resolved with any descendants applied. - def __truediv__(self, child): + Each descendant should be a path segment relative to self + and each may contain multiple levels separated by + ``posixpath.sep`` (``/``). + """ + if not descendants: + return self + names = itertools.chain.from_iterable( + path.parts for path in map(pathlib.PurePosixPath, descendants) + ) + target = next(names) + matches = ( + traversable for traversable in self.iterdir() if traversable.name == target + ) + try: + match = next(matches) + except StopIteration: + raise TraversalError( + "Target not found during traversal.", target, list(names) + ) + return match.joinpath(*names) + + def __truediv__(self, child: StrPath) -> "Traversable": """ Return Traversable child in self """ @@ -107,7 +139,8 @@ class Traversable(Protocol): accepted by io.TextIOWrapper. """ - @abc.abstractproperty + @property + @abc.abstractmethod def name(self) -> str: """ The base name of this object without any parent references. @@ -121,17 +154,17 @@ class TraversableResources(ResourceReader): """ @abc.abstractmethod - def files(self): + def files(self) -> "Traversable": """Return a Traversable object for the loaded package.""" - def open_resource(self, resource): + def open_resource(self, resource: StrPath) -> io.BufferedReader: return self.files().joinpath(resource).open('rb') - def resource_path(self, resource): + def resource_path(self, resource: Any) -> NoReturn: raise FileNotFoundError(resource) - def is_resource(self, path): + def is_resource(self, path: StrPath) -> bool: return self.files().joinpath(path).is_file() - def contents(self): + def contents(self) -> Iterator[str]: return (item.name for item in self.files().iterdir()) diff --git a/lib/pkg_resources/_vendor/importlib_resources/readers.py b/lib/pkg_resources/_vendor/importlib_resources/readers.py index f1190ca4..ab34db74 100644 --- a/lib/pkg_resources/_vendor/importlib_resources/readers.py +++ b/lib/pkg_resources/_vendor/importlib_resources/readers.py @@ -82,15 +82,13 @@ class MultiplexedPath(abc.Traversable): def is_file(self): return False - def joinpath(self, child): - # first try to find child in current paths - for file in self.iterdir(): - if file.name == child: - return file - # if it does not exist, construct it with the first path - return self._paths[0] / child - - __truediv__ = joinpath + def joinpath(self, *descendants): + try: + return super().joinpath(*descendants) + except abc.TraversalError: + # One of the paths did not resolve (a directory does not exist). + # Just return something that will not exist. + return self._paths[0].joinpath(*descendants) def open(self, *args, **kwargs): raise FileNotFoundError(f'{self} is not a file') diff --git a/lib/pkg_resources/_vendor/importlib_resources/simple.py b/lib/pkg_resources/_vendor/importlib_resources/simple.py index da073cbd..7770c922 100644 --- a/lib/pkg_resources/_vendor/importlib_resources/simple.py +++ b/lib/pkg_resources/_vendor/importlib_resources/simple.py @@ -16,31 +16,28 @@ class SimpleReader(abc.ABC): provider. """ - @abc.abstractproperty - def package(self): - # type: () -> str + @property + @abc.abstractmethod + def package(self) -> str: """ The name of the package for which this reader loads resources. """ @abc.abstractmethod - def children(self): - # type: () -> List['SimpleReader'] + def children(self) -> List['SimpleReader']: """ Obtain an iterable of SimpleReader for available child containers (e.g. directories). """ @abc.abstractmethod - def resources(self): - # type: () -> List[str] + def resources(self) -> List[str]: """ Obtain available named resources for this virtual package. """ @abc.abstractmethod - def open_binary(self, resource): - # type: (str) -> BinaryIO + def open_binary(self, resource: str) -> BinaryIO: """ Obtain a File-like for a named resource. """ @@ -50,13 +47,35 @@ class SimpleReader(abc.ABC): return self.package.split('.')[-1] +class ResourceContainer(Traversable): + """ + Traversable container for a package's resources via its reader. + """ + + def __init__(self, reader: SimpleReader): + self.reader = reader + + def is_dir(self): + return True + + def is_file(self): + return False + + def iterdir(self): + files = (ResourceHandle(self, name) for name in self.reader.resources) + dirs = map(ResourceContainer, self.reader.children()) + return itertools.chain(files, dirs) + + def open(self, *args, **kwargs): + raise IsADirectoryError() + + class ResourceHandle(Traversable): """ Handle to a named resource in a ResourceReader. """ - def __init__(self, parent, name): - # type: (ResourceContainer, str) -> None + def __init__(self, parent: ResourceContainer, name: str): self.parent = parent self.name = name # type: ignore @@ -76,35 +95,6 @@ class ResourceHandle(Traversable): raise RuntimeError("Cannot traverse into a resource") -class ResourceContainer(Traversable): - """ - Traversable container for a package's resources via its reader. - """ - - def __init__(self, reader): - # type: (SimpleReader) -> None - self.reader = reader - - def is_dir(self): - return True - - def is_file(self): - return False - - def iterdir(self): - files = (ResourceHandle(self, name) for name in self.reader.resources) - dirs = map(ResourceContainer, self.reader.children()) - return itertools.chain(files, dirs) - - def open(self, *args, **kwargs): - raise IsADirectoryError() - - def joinpath(self, name): - return next( - traversable for traversable in self.iterdir() if traversable.name == name - ) - - class TraversableReader(TraversableResources, SimpleReader): """ A TraversableResources based on SimpleReader. Resource providers diff --git a/lib/pkg_resources/_vendor/vendored.txt b/lib/pkg_resources/_vendor/vendored.txt index da7d08d2..6fafd437 100644 --- a/lib/pkg_resources/_vendor/vendored.txt +++ b/lib/pkg_resources/_vendor/vendored.txt @@ -6,6 +6,6 @@ typing_extensions==4.4.0 jaraco.text==3.7.0 # required for jaraco.text on older Pythons -importlib_resources==5.4.0 +importlib_resources==5.10.2 # required for importlib_resources on older Pythons zipp==3.7.0 From 9d8462f4efba7160ff5f9c0deb54d77b602554f1 Mon Sep 17 00:00:00 2001 From: JackDandy Date: Tue, 7 Mar 2023 15:15:39 +0000 Subject: [PATCH 12/21] =?UTF-8?q?Update=20Tornado=20Web=20Server=206.2.0?= =?UTF-8?q?=20(a4f08a3)=20=E2=86=92=206.3.dev1=20(7186b86).?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CHANGES.md | 1 + lib/tornado/__init__.py | 45 ++++++- lib/tornado/_locale_data.py | 124 ++++++++--------- lib/tornado/auth.py | 56 +++++--- lib/tornado/curl_httpclient.py | 4 +- lib/tornado/escape.py | 2 +- lib/tornado/gen.py | 26 ++-- lib/tornado/ioloop.py | 52 ++++---- lib/tornado/iostream.py | 44 ++---- lib/tornado/locale.py | 6 +- lib/tornado/netutil.py | 6 +- lib/tornado/options.py | 6 +- lib/tornado/platform/asyncio.py | 53 +++----- lib/tornado/platform/caresresolver.py | 13 +- lib/tornado/queues.py | 4 +- lib/tornado/simple_httpclient.py | 2 +- lib/tornado/tcpclient.py | 6 +- lib/tornado/tcpserver.py | 8 +- lib/tornado/testing.py | 133 ++++++++----------- lib/tornado/web.py | 184 +++++++++++++++++--------- lib/tornado/websocket.py | 13 +- lib/tornado/wsgi.py | 108 +++++++++++---- sickgear/webserve.py | 4 +- 23 files changed, 516 insertions(+), 384 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 99d74bb7..df558de5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,6 +2,7 @@ * Update html5lib 1.1 (f87487a) to 1.2-dev (3e500bb) * Update package resource API 63.2.0 (3ae44cd) to 67.5.1 (f51eccd) +* Update Tornado Web Server 6.2.0 (a4f08a3) to 6.3.0 (7186b86) * Update urllib3 1.26.13 (25fbd5f) to 1.26.14 (a06c05c) * Change remove calls to legacy py2 fix encoding function * Change requirements for pure py3 diff --git a/lib/tornado/__init__.py b/lib/tornado/__init__.py index 39d7c44b..060b836a 100644 --- a/lib/tornado/__init__.py +++ b/lib/tornado/__init__.py @@ -22,5 +22,46 @@ # is zero for an official release, positive for a development branch, # or negative for a release candidate or beta (after the base version # number has been incremented) -version = "6.2" -version_info = (6, 2, 0, 0) +version = "6.3.dev1" +version_info = (6, 3, 0, -100) + +import importlib +import typing + +__all__ = [ + "auth", + "autoreload", + "concurrent", + "curl_httpclient", + "escape", + "gen", + "http1connection", + "httpclient", + "httpserver", + "httputil", + "ioloop", + "iostream", + "locale", + "locks", + "log", + "netutil", + "options", + "platform", + "process", + "queues", + "routing", + "simple_httpclient", + "tcpclient", + "tcpserver", + "template", + "testing", + "util", + "web", +] + + +# Copied from https://peps.python.org/pep-0562/ +def __getattr__(name: str) -> typing.Any: + if name in __all__: + return importlib.import_module("." + name, __name__) + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/lib/tornado/_locale_data.py b/lib/tornado/_locale_data.py index c706230e..7a5d2852 100644 --- a/lib/tornado/_locale_data.py +++ b/lib/tornado/_locale_data.py @@ -15,66 +15,66 @@ """Data used by the tornado.locale module.""" LOCALE_NAMES = { - "af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"}, - "am_ET": {"name_en": u"Amharic", "name": u"አማርኛ"}, - "ar_AR": {"name_en": u"Arabic", "name": u"العربية"}, - "bg_BG": {"name_en": u"Bulgarian", "name": u"Български"}, - "bn_IN": {"name_en": u"Bengali", "name": u"বাংলা"}, - "bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"}, - "ca_ES": {"name_en": u"Catalan", "name": u"Català"}, - "cs_CZ": {"name_en": u"Czech", "name": u"Čeština"}, - "cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"}, - "da_DK": {"name_en": u"Danish", "name": u"Dansk"}, - "de_DE": {"name_en": u"German", "name": u"Deutsch"}, - "el_GR": {"name_en": u"Greek", "name": u"Ελληνικά"}, - "en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"}, - "en_US": {"name_en": u"English (US)", "name": u"English (US)"}, - "es_ES": {"name_en": u"Spanish (Spain)", "name": u"Español (España)"}, - "es_LA": {"name_en": u"Spanish", "name": u"Español"}, - "et_EE": {"name_en": u"Estonian", "name": u"Eesti"}, - "eu_ES": {"name_en": u"Basque", "name": u"Euskara"}, - "fa_IR": {"name_en": u"Persian", "name": u"فارسی"}, - "fi_FI": {"name_en": u"Finnish", "name": u"Suomi"}, - "fr_CA": {"name_en": u"French (Canada)", "name": u"Français (Canada)"}, - "fr_FR": {"name_en": u"French", "name": u"Français"}, - "ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"}, - "gl_ES": {"name_en": u"Galician", "name": u"Galego"}, - "he_IL": {"name_en": u"Hebrew", "name": u"עברית"}, - "hi_IN": {"name_en": u"Hindi", "name": u"हिन्दी"}, - "hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"}, - "hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"}, - "id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"}, - "is_IS": {"name_en": u"Icelandic", "name": u"Íslenska"}, - "it_IT": {"name_en": u"Italian", "name": u"Italiano"}, - "ja_JP": {"name_en": u"Japanese", "name": u"日本語"}, - "ko_KR": {"name_en": u"Korean", "name": u"한국어"}, - "lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvių"}, - "lv_LV": {"name_en": u"Latvian", "name": u"Latviešu"}, - "mk_MK": {"name_en": u"Macedonian", "name": u"Македонски"}, - "ml_IN": {"name_en": u"Malayalam", "name": u"മലയാളം"}, - "ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"}, - "nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokmål)"}, - "nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"}, - "nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"}, - "pa_IN": {"name_en": u"Punjabi", "name": u"ਪੰਜਾਬੀ"}, - "pl_PL": {"name_en": u"Polish", "name": u"Polski"}, - "pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Português (Brasil)"}, - "pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Português (Portugal)"}, - "ro_RO": {"name_en": u"Romanian", "name": u"Română"}, - "ru_RU": {"name_en": u"Russian", "name": u"Русский"}, - "sk_SK": {"name_en": u"Slovak", "name": u"Slovenčina"}, - "sl_SI": {"name_en": u"Slovenian", "name": u"Slovenščina"}, - "sq_AL": {"name_en": u"Albanian", "name": u"Shqip"}, - "sr_RS": {"name_en": u"Serbian", "name": u"Српски"}, - "sv_SE": {"name_en": u"Swedish", "name": u"Svenska"}, - "sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"}, - "ta_IN": {"name_en": u"Tamil", "name": u"தமிழ்"}, - "te_IN": {"name_en": u"Telugu", "name": u"తెలుగు"}, - "th_TH": {"name_en": u"Thai", "name": u"ภาษาไทย"}, - "tl_PH": {"name_en": u"Filipino", "name": u"Filipino"}, - "tr_TR": {"name_en": u"Turkish", "name": u"Türkçe"}, - "uk_UA": {"name_en": u"Ukraini ", "name": u"Українська"}, - "vi_VN": {"name_en": u"Vietnamese", "name": u"Tiếng Việt"}, - "zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"中文(简体)"}, - "zh_TW": {"name_en": u"Chinese (Traditional)", "name": u"中文(繁體)"}, + "af_ZA": {"name_en": "Afrikaans", "name": "Afrikaans"}, + "am_ET": {"name_en": "Amharic", "name": "አማርኛ"}, + "ar_AR": {"name_en": "Arabic", "name": "العربية"}, + "bg_BG": {"name_en": "Bulgarian", "name": "Български"}, + "bn_IN": {"name_en": "Bengali", "name": "বাংলা"}, + "bs_BA": {"name_en": "Bosnian", "name": "Bosanski"}, + "ca_ES": {"name_en": "Catalan", "name": "Català"}, + "cs_CZ": {"name_en": "Czech", "name": "Čeština"}, + "cy_GB": {"name_en": "Welsh", "name": "Cymraeg"}, + "da_DK": {"name_en": "Danish", "name": "Dansk"}, + "de_DE": {"name_en": "German", "name": "Deutsch"}, + "el_GR": {"name_en": "Greek", "name": "Ελληνικά"}, + "en_GB": {"name_en": "English (UK)", "name": "English (UK)"}, + "en_US": {"name_en": "English (US)", "name": "English (US)"}, + "es_ES": {"name_en": "Spanish (Spain)", "name": "Español (España)"}, + "es_LA": {"name_en": "Spanish", "name": "Español"}, + "et_EE": {"name_en": "Estonian", "name": "Eesti"}, + "eu_ES": {"name_en": "Basque", "name": "Euskara"}, + "fa_IR": {"name_en": "Persian", "name": "فارسی"}, + "fi_FI": {"name_en": "Finnish", "name": "Suomi"}, + "fr_CA": {"name_en": "French (Canada)", "name": "Français (Canada)"}, + "fr_FR": {"name_en": "French", "name": "Français"}, + "ga_IE": {"name_en": "Irish", "name": "Gaeilge"}, + "gl_ES": {"name_en": "Galician", "name": "Galego"}, + "he_IL": {"name_en": "Hebrew", "name": "עברית"}, + "hi_IN": {"name_en": "Hindi", "name": "हिन्दी"}, + "hr_HR": {"name_en": "Croatian", "name": "Hrvatski"}, + "hu_HU": {"name_en": "Hungarian", "name": "Magyar"}, + "id_ID": {"name_en": "Indonesian", "name": "Bahasa Indonesia"}, + "is_IS": {"name_en": "Icelandic", "name": "Íslenska"}, + "it_IT": {"name_en": "Italian", "name": "Italiano"}, + "ja_JP": {"name_en": "Japanese", "name": "日本語"}, + "ko_KR": {"name_en": "Korean", "name": "한국어"}, + "lt_LT": {"name_en": "Lithuanian", "name": "Lietuvių"}, + "lv_LV": {"name_en": "Latvian", "name": "Latviešu"}, + "mk_MK": {"name_en": "Macedonian", "name": "Македонски"}, + "ml_IN": {"name_en": "Malayalam", "name": "മലയാളം"}, + "ms_MY": {"name_en": "Malay", "name": "Bahasa Melayu"}, + "nb_NO": {"name_en": "Norwegian (bokmal)", "name": "Norsk (bokmål)"}, + "nl_NL": {"name_en": "Dutch", "name": "Nederlands"}, + "nn_NO": {"name_en": "Norwegian (nynorsk)", "name": "Norsk (nynorsk)"}, + "pa_IN": {"name_en": "Punjabi", "name": "ਪੰਜਾਬੀ"}, + "pl_PL": {"name_en": "Polish", "name": "Polski"}, + "pt_BR": {"name_en": "Portuguese (Brazil)", "name": "Português (Brasil)"}, + "pt_PT": {"name_en": "Portuguese (Portugal)", "name": "Português (Portugal)"}, + "ro_RO": {"name_en": "Romanian", "name": "Română"}, + "ru_RU": {"name_en": "Russian", "name": "Русский"}, + "sk_SK": {"name_en": "Slovak", "name": "Slovenčina"}, + "sl_SI": {"name_en": "Slovenian", "name": "Slovenščina"}, + "sq_AL": {"name_en": "Albanian", "name": "Shqip"}, + "sr_RS": {"name_en": "Serbian", "name": "Српски"}, + "sv_SE": {"name_en": "Swedish", "name": "Svenska"}, + "sw_KE": {"name_en": "Swahili", "name": "Kiswahili"}, + "ta_IN": {"name_en": "Tamil", "name": "தமிழ்"}, + "te_IN": {"name_en": "Telugu", "name": "తెలుగు"}, + "th_TH": {"name_en": "Thai", "name": "ภาษาไทย"}, + "tl_PH": {"name_en": "Filipino", "name": "Filipino"}, + "tr_TR": {"name_en": "Turkish", "name": "Türkçe"}, + "uk_UA": {"name_en": "Ukraini ", "name": "Українська"}, + "vi_VN": {"name_en": "Vietnamese", "name": "Tiếng Việt"}, + "zh_CN": {"name_en": "Chinese (Simplified)", "name": "中文(简体)"}, + "zh_TW": {"name_en": "Chinese (Traditional)", "name": "中文(繁體)"}, } diff --git a/lib/tornado/auth.py b/lib/tornado/auth.py index d1cf29b3..59501f56 100644 --- a/lib/tornado/auth.py +++ b/lib/tornado/auth.py @@ -42,7 +42,7 @@ Example usage for Google OAuth: user = await self.get_authenticated_user( redirect_uri='http://your.site.com/auth/google', code=self.get_argument('code')) - # Save the user with e.g. set_secure_cookie + # Save the user with e.g. set_signed_cookie else: self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', @@ -136,7 +136,7 @@ class OpenIdMixin(object): args = dict( (k, v[-1]) for k, v in handler.request.arguments.items() ) # type: Dict[str, Union[str, bytes]] - args["openid.mode"] = u"check_authentication" + args["openid.mode"] = "check_authentication" url = self._OPENID_ENDPOINT # type: ignore if http_client is None: http_client = self.get_auth_http_client() @@ -211,14 +211,14 @@ class OpenIdMixin(object): for key in handler.request.arguments: if ( key.startswith("openid.ns.") - and handler.get_argument(key) == u"http://openid.net/srv/ax/1.0" + and handler.get_argument(key) == "http://openid.net/srv/ax/1.0" ): ax_ns = key[10:] break def get_ax_arg(uri: str) -> str: if not ax_ns: - return u"" + return "" prefix = "openid." + ax_ns + ".type." ax_name = None for name in handler.request.arguments.keys(): @@ -227,8 +227,8 @@ class OpenIdMixin(object): ax_name = "openid." + ax_ns + ".value." + part break if not ax_name: - return u"" - return handler.get_argument(ax_name, u"") + return "" + return handler.get_argument(ax_name, "") email = get_ax_arg("http://axschema.org/contact/email") name = get_ax_arg("http://axschema.org/namePerson") @@ -247,7 +247,7 @@ class OpenIdMixin(object): if name: user["name"] = name elif name_parts: - user["name"] = u" ".join(name_parts) + user["name"] = " ".join(name_parts) elif email: user["name"] = email.split("@")[0] if email: @@ -694,7 +694,7 @@ class TwitterMixin(OAuthMixin): async def get(self): if self.get_argument("oauth_token", None): user = await self.get_authenticated_user() - # Save the user using e.g. set_secure_cookie() + # Save the user using e.g. set_signed_cookie() else: await self.authorize_redirect() @@ -855,8 +855,28 @@ class GoogleOAuth2Mixin(OAuth2Mixin): _OAUTH_NO_CALLBACKS = False _OAUTH_SETTINGS_KEY = "google_oauth" + def get_google_oauth_settings(self) -> Dict[str, str]: + """Return the Google OAuth 2.0 credentials that you created with + [Google Cloud + Platform](https://console.cloud.google.com/apis/credentials). The dict + format is:: + + { + "key": "your_client_id", "secret": "your_client_secret" + } + + If your credentials are stored differently (e.g. in a db) you can + override this method for custom provision. + """ + handler = cast(RequestHandler, self) + return handler.settings[self._OAUTH_SETTINGS_KEY] + async def get_authenticated_user( - self, redirect_uri: str, code: str + self, + redirect_uri: str, + code: str, + client_id: Optional[str] = None, + client_secret: Optional[str] = None, ) -> Dict[str, Any]: """Handles the login for the Google user, returning an access token. @@ -883,11 +903,11 @@ class GoogleOAuth2Mixin(OAuth2Mixin): "https://www.googleapis.com/oauth2/v1/userinfo", access_token=access["access_token"]) # Save the user and access token with - # e.g. set_secure_cookie. + # e.g. set_signed_cookie. else: self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', - client_id=self.settings['google_oauth']['key'], + client_id=self.get_google_oauth_settings()['key'], scope=['profile', 'email'], response_type='code', extra_params={'approval_prompt': 'auto'}) @@ -899,14 +919,20 @@ class GoogleOAuth2Mixin(OAuth2Mixin): The ``callback`` argument was removed. Use the returned awaitable object instead. """ # noqa: E501 - handler = cast(RequestHandler, self) + + if client_id is None or client_secret is None: + settings = self.get_google_oauth_settings() + if client_id is None: + client_id = settings["key"] + if client_secret is None: + client_secret = settings["secret"] http = self.get_auth_http_client() body = urllib.parse.urlencode( { "redirect_uri": redirect_uri, "code": code, - "client_id": handler.settings[self._OAUTH_SETTINGS_KEY]["key"], - "client_secret": handler.settings[self._OAUTH_SETTINGS_KEY]["secret"], + "client_id": client_id, + "client_secret": client_secret, "grant_type": "authorization_code", } ) @@ -951,7 +977,7 @@ class FacebookGraphMixin(OAuth2Mixin): client_id=self.settings["facebook_api_key"], client_secret=self.settings["facebook_secret"], code=self.get_argument("code")) - # Save the user with e.g. set_secure_cookie + # Save the user with e.g. set_signed_cookie else: self.authorize_redirect( redirect_uri='/auth/facebookgraph/', diff --git a/lib/tornado/curl_httpclient.py b/lib/tornado/curl_httpclient.py index 61b6b7a9..23320e48 100644 --- a/lib/tornado/curl_httpclient.py +++ b/lib/tornado/curl_httpclient.py @@ -36,11 +36,11 @@ from tornado.httpclient import ( ) from tornado.log import app_log -from typing import Dict, Any, Callable, Union, Tuple, Optional +from typing import Dict, Any, Callable, Union, Optional import typing if typing.TYPE_CHECKING: - from typing import Deque # noqa: F401 + from typing import Deque, Tuple # noqa: F401 curl_log = logging.getLogger("tornado.curl_httpclient") diff --git a/lib/tornado/escape.py b/lib/tornado/escape.py index 3cf7ff2e..55354c30 100644 --- a/lib/tornado/escape.py +++ b/lib/tornado/escape.py @@ -368,7 +368,7 @@ def linkify( # have a status bar, such as Safari by default) params += ' title="%s"' % href - return u'%s' % (href, params, url) + return '%s' % (href, params, url) # First HTML-escape so that our strings are all safe. # The regex is modified to avoid character entites other than & so diff --git a/lib/tornado/gen.py b/lib/tornado/gen.py index 1946ab91..4819b857 100644 --- a/lib/tornado/gen.py +++ b/lib/tornado/gen.py @@ -743,7 +743,7 @@ class Runner(object): self.running = False self.finished = False self.io_loop = IOLoop.current() - if self.handle_yield(first_yielded): + if self.ctx_run(self.handle_yield, first_yielded): gen = result_future = first_yielded = None # type: ignore self.ctx_run(self.run) @@ -763,21 +763,25 @@ class Runner(object): return self.future = None try: - exc_info = None - try: value = future.result() - except Exception: - exc_info = sys.exc_info() - future = None + except Exception as e: + # Save the exception for later. It's important that + # gen.throw() not be called inside this try/except block + # because that makes sys.exc_info behave unexpectedly. + exc: Optional[Exception] = e + else: + exc = None + finally: + future = None - if exc_info is not None: + if exc is not None: try: - yielded = self.gen.throw(*exc_info) # type: ignore + yielded = self.gen.throw(exc) finally: - # Break up a reference to itself - # for faster GC on CPython. - exc_info = None + # Break up a circular reference for faster GC on + # CPython. + del exc else: yielded = self.gen.send(value) diff --git a/lib/tornado/ioloop.py b/lib/tornado/ioloop.py index 2c05755d..bcdcca09 100644 --- a/lib/tornado/ioloop.py +++ b/lib/tornado/ioloop.py @@ -83,7 +83,7 @@ class IOLoop(Configurable): import functools import socket - import tornado.ioloop + import tornado from tornado.iostream import IOStream async def handle_connection(connection, address): @@ -123,8 +123,7 @@ class IOLoop(Configurable): and instead initialize the `asyncio` event loop and use `IOLoop.current()`. In some cases, such as in test frameworks when initializing an `IOLoop` to be run in a secondary thread, it may be appropriate to construct - an `IOLoop` with ``IOLoop(make_current=False)``. Constructing an `IOLoop` - without the ``make_current=False`` argument is deprecated since Tornado 6.2. + an `IOLoop` with ``IOLoop(make_current=False)``. In general, an `IOLoop` cannot survive a fork or be shared across processes in any way. When multiple processes are being used, each process should @@ -145,12 +144,10 @@ class IOLoop(Configurable): cannot be used on Python 3 except to redundantly specify the `asyncio` event loop. - .. deprecated:: 6.2 - It is deprecated to create an event loop that is "current" but not - running. This means it is deprecated to pass - ``make_current=True`` to the ``IOLoop`` constructor, or to create - an ``IOLoop`` while no asyncio event loop is running unless - ``make_current=False`` is used. + .. versionchanged:: 6.3 + ``make_current=True`` is now the default when creating an IOLoop - + previously the default was to make the event loop current if there wasn't + already a current one. """ # These constants were originally based on constants from the epoll module. @@ -263,17 +260,20 @@ class IOLoop(Configurable): """ try: loop = asyncio.get_event_loop() - except (RuntimeError, AssertionError): + except RuntimeError: if not instance: return None - raise + # Create a new asyncio event loop for this thread. + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: return IOLoop._ioloop_for_asyncio[loop] except KeyError: if instance: from tornado.platform.asyncio import AsyncIOMainLoop - current = AsyncIOMainLoop(make_current=True) # type: Optional[IOLoop] + current = AsyncIOMainLoop() # type: Optional[IOLoop] else: current = None return current @@ -295,12 +295,17 @@ class IOLoop(Configurable): This method also sets the current `asyncio` event loop. .. deprecated:: 6.2 - The concept of an event loop that is "current" without - currently running is deprecated in asyncio since Python - 3.10. All related functionality in Tornado is also - deprecated. Instead, start the event loop with `asyncio.run` - before interacting with it. + Setting and clearing the current event loop through Tornado is + deprecated. Use ``asyncio.set_event_loop`` instead if you need this. """ + warnings.warn( + "make_current is deprecated; start the event loop first", + DeprecationWarning, + stacklevel=2, + ) + self._make_current() + + def _make_current(self) -> None: # The asyncio event loops override this method. raise NotImplementedError() @@ -344,16 +349,9 @@ class IOLoop(Configurable): return AsyncIOLoop - def initialize(self, make_current: Optional[bool] = None) -> None: - if make_current is None: - if IOLoop.current(instance=False) is None: - self.make_current() - elif make_current: - current = IOLoop.current(instance=False) - # AsyncIO loops can already be current by this point. - if current is not None and current is not self: - raise RuntimeError("current IOLoop already exists") - self.make_current() + def initialize(self, make_current: bool = True) -> None: + if make_current: + self._make_current() def close(self, all_fds: bool = False) -> None: """Closes the `IOLoop`, freeing any resources used. diff --git a/lib/tornado/iostream.py b/lib/tornado/iostream.py index 7f19a7fa..e7291263 100644 --- a/lib/tornado/iostream.py +++ b/lib/tornado/iostream.py @@ -195,11 +195,9 @@ class _StreamBuffer(object): pos += size size = 0 else: - # Amortized O(1) shrink for Python 2 pos += size - if len(b) <= 2 * pos: - del typing.cast(bytearray, b)[:pos] - pos = 0 + del typing.cast(bytearray, b)[:pos] + pos = 0 size = 0 assert size == 0 @@ -254,7 +252,6 @@ class BaseIOStream(object): self.max_write_buffer_size = max_write_buffer_size self.error = None # type: Optional[BaseException] self._read_buffer = bytearray() - self._read_buffer_pos = 0 self._read_buffer_size = 0 self._user_read_buffer = False self._after_user_read_buffer = None # type: Optional[bytearray] @@ -451,21 +448,17 @@ class BaseIOStream(object): available_bytes = self._read_buffer_size n = len(buf) if available_bytes >= n: - end = self._read_buffer_pos + n - buf[:] = memoryview(self._read_buffer)[self._read_buffer_pos : end] - del self._read_buffer[:end] + buf[:] = memoryview(self._read_buffer)[:n] + del self._read_buffer[:n] self._after_user_read_buffer = self._read_buffer elif available_bytes > 0: - buf[:available_bytes] = memoryview(self._read_buffer)[ - self._read_buffer_pos : - ] + buf[:available_bytes] = memoryview(self._read_buffer)[:] # Set up the supplied buffer as our temporary read buffer. # The original (if it had any data remaining) has been # saved for later. self._user_read_buffer = True self._read_buffer = buf - self._read_buffer_pos = 0 self._read_buffer_size = available_bytes self._read_bytes = n self._read_partial = partial @@ -818,7 +811,6 @@ class BaseIOStream(object): if self._user_read_buffer: self._read_buffer = self._after_user_read_buffer or bytearray() self._after_user_read_buffer = None - self._read_buffer_pos = 0 self._read_buffer_size = len(self._read_buffer) self._user_read_buffer = False result = size # type: Union[int, bytes] @@ -931,20 +923,17 @@ class BaseIOStream(object): # since large merges are relatively expensive and get undone in # _consume(). if self._read_buffer: - loc = self._read_buffer.find( - self._read_delimiter, self._read_buffer_pos - ) + loc = self._read_buffer.find(self._read_delimiter) if loc != -1: - loc -= self._read_buffer_pos delimiter_len = len(self._read_delimiter) self._check_max_bytes(self._read_delimiter, loc + delimiter_len) return loc + delimiter_len self._check_max_bytes(self._read_delimiter, self._read_buffer_size) elif self._read_regex is not None: if self._read_buffer: - m = self._read_regex.search(self._read_buffer, self._read_buffer_pos) + m = self._read_regex.search(self._read_buffer) if m is not None: - loc = m.end() - self._read_buffer_pos + loc = m.end() self._check_max_bytes(self._read_regex, loc) return loc self._check_max_bytes(self._read_regex, self._read_buffer_size) @@ -1001,19 +990,9 @@ class BaseIOStream(object): return b"" assert loc <= self._read_buffer_size # Slice the bytearray buffer into bytes, without intermediate copying - b = ( - memoryview(self._read_buffer)[ - self._read_buffer_pos : self._read_buffer_pos + loc - ] - ).tobytes() - self._read_buffer_pos += loc + b = (memoryview(self._read_buffer)[:loc]).tobytes() self._read_buffer_size -= loc - # Amortized O(1) shrink - # (this heuristic is implemented natively in Python 3.4+ - # but is replicated here for Python 2) - if self._read_buffer_pos > self._read_buffer_size: - del self._read_buffer[: self._read_buffer_pos] - self._read_buffer_pos = 0 + del self._read_buffer[:loc] return b def _check_closed(self) -> None: @@ -1092,9 +1071,8 @@ class IOStream(BaseIOStream): .. testcode:: - import tornado.ioloop - import tornado.iostream import socket + import tornado async def main(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) diff --git a/lib/tornado/locale.py b/lib/tornado/locale.py index 533ce4d4..55072af2 100644 --- a/lib/tornado/locale.py +++ b/lib/tornado/locale.py @@ -268,7 +268,7 @@ class Locale(object): def __init__(self, code: str) -> None: self.code = code - self.name = LOCALE_NAMES.get(code, {}).get("name", u"Unknown") + self.name = LOCALE_NAMES.get(code, {}).get("name", "Unknown") self.rtl = False for prefix in ["fa", "ar", "he"]: if self.code.startswith(prefix): @@ -406,7 +406,7 @@ class Locale(object): str_time = "%d:%02d" % (local_date.hour, local_date.minute) elif self.code == "zh_CN": str_time = "%s%d:%02d" % ( - (u"\u4e0a\u5348", u"\u4e0b\u5348")[local_date.hour >= 12], + ("\u4e0a\u5348", "\u4e0b\u5348")[local_date.hour >= 12], local_date.hour % 12 or 12, local_date.minute, ) @@ -458,7 +458,7 @@ class Locale(object): return "" if len(parts) == 1: return parts[0] - comma = u" \u0648 " if self.code.startswith("fa") else u", " + comma = " \u0648 " if self.code.startswith("fa") else ", " return _("%(commas)s and %(last)s") % { "commas": comma.join(parts[:-1]), "last": parts[len(parts) - 1], diff --git a/lib/tornado/netutil.py b/lib/tornado/netutil.py index 069e9a6b..04db085a 100644 --- a/lib/tornado/netutil.py +++ b/lib/tornado/netutil.py @@ -44,10 +44,10 @@ if hasattr(ssl, "OP_NO_COMPRESSION"): # module-import time, the import lock is already held by the main thread, # leading to deadlock. Avoid it by caching the idna encoder on the main # thread now. -u"foo".encode("idna") +"foo".encode("idna") # For undiagnosed reasons, 'latin1' codec may also need to be preloaded. -u"foo".encode("latin1") +"foo".encode("latin1") # Default backlog used when calling sock.listen() _DEFAULT_BACKLOG = 128 @@ -115,7 +115,7 @@ def bind_sockets( sys.platform == "darwin" and address == "localhost" and af == socket.AF_INET6 - and sockaddr[3] != 0 + and sockaddr[3] != 0 # type: ignore ): # Mac OS X includes a link-local address fe80::1%lo0 in the # getaddrinfo results for 'localhost'. However, the firewall diff --git a/lib/tornado/options.py b/lib/tornado/options.py index e62f7efe..b8296691 100644 --- a/lib/tornado/options.py +++ b/lib/tornado/options.py @@ -56,7 +56,7 @@ Your ``main()`` method can parse the command line or parse a config file with either `parse_command_line` or `parse_config_file`:: import myapp.db, myapp.server - import tornado.options + import tornado if __name__ == '__main__': tornado.options.parse_command_line() @@ -427,7 +427,9 @@ class OptionParser(object): % (option.name, option.type.__name__) ) - if type(config[name]) == str and option.type != str: + if type(config[name]) == str and ( + option.type != str or option.multiple + ): option.parse(config[name]) else: option.set(config[name]) diff --git a/lib/tornado/platform/asyncio.py b/lib/tornado/platform/asyncio.py index ca671ac6..a15a74df 100644 --- a/lib/tornado/platform/asyncio.py +++ b/lib/tornado/platform/asyncio.py @@ -36,10 +36,10 @@ import warnings from tornado.gen import convert_yielded from tornado.ioloop import IOLoop, _Selectable -from typing import Any, TypeVar, Awaitable, Callable, Union, Optional, List, Tuple, Dict +from typing import Any, TypeVar, Awaitable, Callable, Union, Optional, List, Dict if typing.TYPE_CHECKING: - from typing import Set # noqa: F401 + from typing import Set, Tuple # noqa: F401 from typing_extensions import Protocol class _HasFileno(Protocol): @@ -74,20 +74,6 @@ def _atexit_callback() -> None: atexit.register(_atexit_callback) -if sys.version_info >= (3, 10): - - def _get_event_loop() -> asyncio.AbstractEventLoop: - try: - return asyncio.get_running_loop() - except RuntimeError: - pass - - return asyncio.get_event_loop_policy().get_event_loop() - - -else: - from asyncio import get_event_loop as _get_event_loop - class BaseAsyncIOLoop(IOLoop): def initialize( # type: ignore @@ -206,15 +192,7 @@ class BaseAsyncIOLoop(IOLoop): handler_func(fileobj, events) def start(self) -> None: - try: - old_loop = _get_event_loop() - except (RuntimeError, AssertionError): - old_loop = None # type: ignore - try: - asyncio.set_event_loop(self.asyncio_loop) - self.asyncio_loop.run_forever() - finally: - asyncio.set_event_loop(old_loop) + self.asyncio_loop.run_forever() def stop(self) -> None: self.asyncio_loop.stop() @@ -298,7 +276,7 @@ class AsyncIOMainLoop(BaseAsyncIOLoop): def initialize(self, **kwargs: Any) -> None: # type: ignore super().initialize(asyncio.get_event_loop(), **kwargs) - def make_current(self) -> None: + def _make_current(self) -> None: # AsyncIOMainLoop already refers to the current asyncio loop so # nothing to do here. pass @@ -349,12 +327,7 @@ class AsyncIOLoop(BaseAsyncIOLoop): self._clear_current() super().close(all_fds=all_fds) - def make_current(self) -> None: - warnings.warn( - "make_current is deprecated; start the event loop first", - DeprecationWarning, - stacklevel=2, - ) + def _make_current(self) -> None: if not self.is_current: try: self.old_asyncio = asyncio.get_event_loop() @@ -672,10 +645,18 @@ class AddThreadSelectorEventLoop(asyncio.AbstractEventLoop): self._writers[fd] = functools.partial(callback, *args) self._wake_selector() - def remove_reader(self, fd: "_FileDescriptorLike") -> None: - del self._readers[fd] + def remove_reader(self, fd: "_FileDescriptorLike") -> bool: + try: + del self._readers[fd] + except KeyError: + return False self._wake_selector() + return True - def remove_writer(self, fd: "_FileDescriptorLike") -> None: - del self._writers[fd] + def remove_writer(self, fd: "_FileDescriptorLike") -> bool: + try: + del self._writers[fd] + except KeyError: + return False self._wake_selector() + return True diff --git a/lib/tornado/platform/caresresolver.py b/lib/tornado/platform/caresresolver.py index 962f84f4..1ba45c9a 100644 --- a/lib/tornado/platform/caresresolver.py +++ b/lib/tornado/platform/caresresolver.py @@ -15,14 +15,15 @@ if typing.TYPE_CHECKING: class CaresResolver(Resolver): """Name resolver based on the c-ares library. - This is a non-blocking and non-threaded resolver. It may not produce - the same results as the system resolver, but can be used for non-blocking + This is a non-blocking and non-threaded resolver. It may not produce the + same results as the system resolver, but can be used for non-blocking resolution when threads cannot be used. - c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``, - so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is - the default for ``tornado.simple_httpclient``, but other libraries - may default to ``AF_UNSPEC``. + ``pycares`` will not return a mix of ``AF_INET`` and ``AF_INET6`` when + ``family`` is ``AF_UNSPEC``, so it is only recommended for use in + ``AF_INET`` (i.e. IPv4). This is the default for + ``tornado.simple_httpclient``, but other libraries may default to + ``AF_UNSPEC``. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. diff --git a/lib/tornado/queues.py b/lib/tornado/queues.py index 32132e16..1358d0ec 100644 --- a/lib/tornado/queues.py +++ b/lib/tornado/queues.py @@ -381,7 +381,7 @@ class PriorityQueue(Queue): def _put(self, item: _T) -> None: heapq.heappush(self._queue, item) - def _get(self) -> _T: + def _get(self) -> _T: # type: ignore[type-var] return heapq.heappop(self._queue) @@ -418,5 +418,5 @@ class LifoQueue(Queue): def _put(self, item: _T) -> None: self._queue.append(item) - def _get(self) -> _T: + def _get(self) -> _T: # type: ignore[type-var] return self._queue.pop() diff --git a/lib/tornado/simple_httpclient.py b/lib/tornado/simple_httpclient.py index 3a1aa53d..2460863f 100644 --- a/lib/tornado/simple_httpclient.py +++ b/lib/tornado/simple_httpclient.py @@ -547,7 +547,7 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): value: Optional[BaseException], tb: Optional[TracebackType], ) -> bool: - if self.final_callback: + if self.final_callback is not None: self._remove_timeout() if isinstance(value, StreamClosedError): if value.real_error is None: diff --git a/lib/tornado/tcpclient.py b/lib/tornado/tcpclient.py index e2d682ea..0a829062 100644 --- a/lib/tornado/tcpclient.py +++ b/lib/tornado/tcpclient.py @@ -21,6 +21,7 @@ import socket import numbers import datetime import ssl +import typing from tornado.concurrent import Future, future_add_done_callback from tornado.ioloop import IOLoop @@ -29,7 +30,10 @@ from tornado import gen from tornado.netutil import Resolver from tornado.gen import TimeoutError -from typing import Any, Union, Dict, Tuple, List, Callable, Iterator, Optional, Set +from typing import Any, Union, Dict, Tuple, List, Callable, Iterator, Optional + +if typing.TYPE_CHECKING: + from typing import Set # noqa(F401) _INITIAL_CONNECT_TIMEOUT = 0.3 diff --git a/lib/tornado/tcpserver.py b/lib/tornado/tcpserver.py index 183aac21..deab8f2a 100644 --- a/lib/tornado/tcpserver.py +++ b/lib/tornado/tcpserver.py @@ -246,9 +246,7 @@ class TCPServer(object): .. deprecated:: 6.2 Use either ``listen()`` or ``add_sockets()`` instead of ``bind()`` - and ``start()``. The ``bind()/start()`` pattern depends on - interfaces that have been deprecated in Python 3.10 and will be - removed in future versions of Python. + and ``start()``. """ sockets = bind_sockets( port, @@ -295,9 +293,7 @@ class TCPServer(object): .. deprecated:: 6.2 Use either ``listen()`` or ``add_sockets()`` instead of ``bind()`` - and ``start()``. The ``bind()/start()`` pattern depends on - interfaces that have been deprecated in Python 3.10 and will be - removed in future versions of Python. + and ``start()``. """ assert not self._started self._started = True diff --git a/lib/tornado/testing.py b/lib/tornado/testing.py index 688464f0..9bfadf45 100644 --- a/lib/tornado/testing.py +++ b/lib/tornado/testing.py @@ -135,7 +135,8 @@ class AsyncTestCase(unittest.TestCase): By default, a new `.IOLoop` is constructed for each test and is available as ``self.io_loop``. If the code being tested requires a - global `.IOLoop`, subclasses should override `get_new_ioloop` to return it. + reused global `.IOLoop`, subclasses should override `get_new_ioloop` to return it, + although this is deprecated as of Tornado 6.3. The `.IOLoop`'s ``start`` and ``stop`` methods should not be called directly. Instead, use `self.stop ` and `self.wait @@ -162,17 +163,6 @@ class AsyncTestCase(unittest.TestCase): response = self.wait() # Test contents of response self.assertIn("FriendFeed", response.body) - - .. deprecated:: 6.2 - - AsyncTestCase and AsyncHTTPTestCase are deprecated due to changes - in future versions of Python (after 3.10). The interfaces used - in this class are incompatible with the deprecation and intended - removal of certain methods related to the idea of a "current" - event loop while no event loop is actually running. Use - `unittest.IsolatedAsyncioTestCase` instead. Note that this class - does not emit DeprecationWarnings until better migration guidance - can be provided. """ def __init__(self, methodName: str = "runTest") -> None: @@ -193,49 +183,22 @@ class AsyncTestCase(unittest.TestCase): self._test_generator = None # type: Optional[Union[Generator, Coroutine]] def setUp(self) -> None: - setup_with_context_manager(self, warnings.catch_warnings()) - warnings.filterwarnings( - "ignore", - message="There is no current event loop", - category=DeprecationWarning, - module=r"tornado\..*", - ) + py_ver = sys.version_info + if ((3, 10, 0) <= py_ver < (3, 10, 9)) or ((3, 11, 0) <= py_ver <= (3, 11, 1)): + # Early releases in the Python 3.10 and 3.1 series had deprecation + # warnings that were later reverted; we must suppress them here. + setup_with_context_manager(self, warnings.catch_warnings()) + warnings.filterwarnings( + "ignore", + message="There is no current event loop", + category=DeprecationWarning, + module=r"tornado\..*", + ) super().setUp() - # NOTE: this code attempts to navigate deprecation warnings introduced - # in Python 3.10. The idea of an implicit current event loop is - # deprecated in that version, with the intention that tests like this - # explicitly create a new event loop and run on it. However, other - # packages such as pytest-asyncio (as of version 0.16.0) still rely on - # the implicit current event loop and we want to be compatible with them - # (even when run on 3.10, but not, of course, on the future version of - # python that removes the get/set_event_loop methods completely). - # - # Deprecation warnings were introduced inconsistently: - # asyncio.get_event_loop warns, but - # asyncio.get_event_loop_policy().get_event_loop does not. Similarly, - # none of the set_event_loop methods warn, although comments on - # https://bugs.python.org/issue39529 indicate that they are also - # intended for future removal. - # - # Therefore, we first attempt to access the event loop with the - # (non-warning) policy method, and if it fails, fall back to creating a - # new event loop. We do not have effective test coverage of the - # new event loop case; this will have to be watched when/if - # get_event_loop is actually removed. - self.should_close_asyncio_loop = False - try: - self.asyncio_loop = asyncio.get_event_loop_policy().get_event_loop() - except Exception: - self.asyncio_loop = asyncio.new_event_loop() - self.should_close_asyncio_loop = True - - async def get_loop() -> IOLoop: - return self.get_new_ioloop() - - self.io_loop = self.asyncio_loop.run_until_complete(get_loop()) - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - self.io_loop.make_current() + if type(self).get_new_ioloop is not AsyncTestCase.get_new_ioloop: + warnings.warn("get_new_ioloop is deprecated", DeprecationWarning) + self.io_loop = self.get_new_ioloop() + asyncio.set_event_loop(self.io_loop.asyncio_loop) # type: ignore[attr-defined] def tearDown(self) -> None: # Native coroutines tend to produce warnings if they're not @@ -270,17 +233,13 @@ class AsyncTestCase(unittest.TestCase): # Clean up Subprocess, so it can be used again with a new ioloop. Subprocess.uninitialize() - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - self.io_loop.clear_current() + asyncio.set_event_loop(None) if not isinstance(self.io_loop, _NON_OWNED_IOLOOPS): # Try to clean up any file descriptors left open in the ioloop. # This avoids leaks, especially when tests are run repeatedly # in the same process with autoreload (because curl does not # set FD_CLOEXEC on its file descriptors) self.io_loop.close(all_fds=True) - if self.should_close_asyncio_loop: - self.asyncio_loop.close() super().tearDown() # In case an exception escaped or the StackContext caught an exception # when there wasn't a wait() to re-raise it, do so here. @@ -298,6 +257,9 @@ class AsyncTestCase(unittest.TestCase): singletons using the default `.IOLoop`) or if a per-test event loop is being provided by another system (such as ``pytest-asyncio``). + + .. deprecated:: 6.3 + This method will be removed in Tornado 7.0. """ return IOLoop(make_current=False) @@ -435,10 +397,6 @@ class AsyncHTTPTestCase(AsyncTestCase): like ``http_client.fetch()``, into a synchronous operation. If you need to do other asynchronous operations in tests, you'll probably need to use ``stop()`` and ``wait()`` yourself. - - .. deprecated:: 6.2 - `AsyncTestCase` and `AsyncHTTPTestCase` are deprecated due to changes - in Python 3.10; see comments on `AsyncTestCase` for more details. """ def setUp(self) -> None: @@ -672,7 +630,7 @@ def gen_test( # noqa: F811 if self._test_generator is not None and getattr( self._test_generator, "cr_running", True ): - self._test_generator.throw(type(e), e) + self._test_generator.throw(e) # In case the test contains an overly broad except # clause, we may get back here. # Coroutine was stopped or didn't raise a useful stack trace, @@ -724,28 +682,37 @@ class ExpectLog(logging.Filter): ) -> None: """Constructs an ExpectLog context manager. - :param logger: Logger object (or name of logger) to watch. Pass - an empty string to watch the root logger. - :param regex: Regular expression to match. Any log entries on - the specified logger that match this regex will be suppressed. - :param required: If true, an exception will be raised if the end of - the ``with`` statement is reached without matching any log entries. + :param logger: Logger object (or name of logger) to watch. Pass an + empty string to watch the root logger. + :param regex: Regular expression to match. Any log entries on the + specified logger that match this regex will be suppressed. + :param required: If true, an exception will be raised if the end of the + ``with`` statement is reached without matching any log entries. :param level: A constant from the ``logging`` module indicating the expected log level. If this parameter is provided, only log messages at this level will be considered to match. Additionally, the - supplied ``logger`` will have its level adjusted if necessary - (for the duration of the ``ExpectLog`` to enable the expected - message. + supplied ``logger`` will have its level adjusted if necessary (for + the duration of the ``ExpectLog`` to enable the expected message. .. versionchanged:: 6.1 Added the ``level`` parameter. + + .. deprecated:: 6.3 + In Tornado 7.0, only ``WARNING`` and higher logging levels will be + matched by default. To match ``INFO`` and lower levels, the ``level`` + argument must be used. This is changing to minimize differences + between ``tornado.testing.main`` (which enables ``INFO`` logs by + default) and most other test runners (including those in IDEs) + which have ``INFO`` logs disabled by default. """ if isinstance(logger, basestring_type): logger = logging.getLogger(logger) self.logger = logger self.regex = re.compile(regex) self.required = required - self.matched = False + # matched and deprecated_level_matched are a counter for the respective event. + self.matched = 0 + self.deprecated_level_matched = 0 self.logged_stack = False self.level = level self.orig_level = None # type: Optional[int] @@ -755,13 +722,20 @@ class ExpectLog(logging.Filter): self.logged_stack = True message = record.getMessage() if self.regex.match(message): + if self.level is None and record.levelno < logging.WARNING: + # We're inside the logging machinery here so generating a DeprecationWarning + # here won't be reported cleanly (if warnings-as-errors is enabled, the error + # just gets swallowed by the logging module), and even if it were it would + # have the wrong stack trace. Just remember this fact and report it in + # __exit__ instead. + self.deprecated_level_matched += 1 if self.level is not None and record.levelno != self.level: app_log.warning( "Got expected log message %r at unexpected level (%s vs %s)" % (message, logging.getLevelName(self.level), record.levelname) ) return True - self.matched = True + self.matched += 1 return False return True @@ -783,6 +757,15 @@ class ExpectLog(logging.Filter): self.logger.removeFilter(self) if not typ and self.required and not self.matched: raise Exception("did not get expected log message") + if ( + not typ + and self.required + and (self.deprecated_level_matched >= self.matched) + ): + warnings.warn( + "ExpectLog matched at INFO or below without level argument", + DeprecationWarning, + ) # From https://nedbatchelder.com/blog/201508/using_context_managers_in_test_setup.html diff --git a/lib/tornado/web.py b/lib/tornado/web.py index cd6a81b4..18634d89 100644 --- a/lib/tornado/web.py +++ b/lib/tornado/web.py @@ -23,7 +23,7 @@ Here is a simple "Hello, world" example app: .. testcode:: import asyncio - import tornado.web + import tornado class MainHandler(tornado.web.RequestHandler): def get(self): @@ -166,7 +166,7 @@ May be overridden by passing a ``version`` keyword argument. """ DEFAULT_SIGNED_VALUE_MIN_VERSION = 1 -"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`. +"""The oldest signed value accepted by `.RequestHandler.get_signed_cookie`. May be overridden by passing a ``min_version`` keyword argument. @@ -210,7 +210,7 @@ class RequestHandler(object): self, application: "Application", request: httputil.HTTPServerRequest, - **kwargs: Any + **kwargs: Any, ) -> None: super().__init__() @@ -603,21 +603,28 @@ class RequestHandler(object): expires: Optional[Union[float, Tuple, datetime.datetime]] = None, path: str = "/", expires_days: Optional[float] = None, - **kwargs: Any + # Keyword-only args start here for historical reasons. + *, + max_age: Optional[int] = None, + httponly: bool = False, + secure: bool = False, + samesite: Optional[str] = None, ) -> None: """Sets an outgoing cookie name/value with the given options. Newly-set cookies are not immediately visible via `get_cookie`; they are not present until the next request. - expires may be a numeric timestamp as returned by `time.time`, - a time tuple as returned by `time.gmtime`, or a - `datetime.datetime` object. + Most arguments are passed directly to `http.cookies.Morsel` directly. + See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie + for more information. + + ``expires`` may be a numeric timestamp as returned by `time.time`, + a time tuple as returned by `time.gmtime`, or a + `datetime.datetime` object. ``expires_days`` is provided as a convenience + to set an expiration time in days from today (if both are set, ``expires`` + is used). - Additional keyword arguments are set on the cookies.Morsel - directly. - See https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel - for available attributes. """ # The cookie library only accepts type str, in both python 2 and 3 name = escape.native_str(name) @@ -641,56 +648,82 @@ class RequestHandler(object): morsel["expires"] = httputil.format_timestamp(expires) if path: morsel["path"] = path - for k, v in kwargs.items(): - if k == "max_age": - k = "max-age" + if max_age: + # Note change from _ to -. + morsel["max-age"] = str(max_age) + if httponly: + # Note that SimpleCookie ignores the value here. The presense of an + # httponly (or secure) key is treated as true. + morsel["httponly"] = True + if secure: + morsel["secure"] = True + if samesite: + morsel["samesite"] = samesite - # skip falsy values for httponly and secure flags because - # SimpleCookie sets them regardless - if k in ["httponly", "secure"] and not v: - continue - - morsel[k] = v - - def clear_cookie( - self, name: str, path: str = "/", domain: Optional[str] = None - ) -> None: + def clear_cookie(self, name: str, **kwargs: Any) -> None: """Deletes the cookie with the given name. - Due to limitations of the cookie protocol, you must pass the same - path and domain to clear a cookie as were used when that cookie - was set (but there is no way to find out on the server side - which values were used for a given cookie). + This method accepts the same arguments as `set_cookie`, except for + ``expires`` and ``max_age``. Clearing a cookie requires the same + ``domain`` and ``path`` arguments as when it was set. In some cases the + ``samesite`` and ``secure`` arguments are also required to match. Other + arguments are ignored. Similar to `set_cookie`, the effect of this method will not be seen until the following request. + + .. versionchanged:: 6.3 + + Now accepts all keyword arguments that ``set_cookie`` does. + The ``samesite`` and ``secure`` flags have recently become + required for clearing ``samesite="none"`` cookies. """ + for excluded_arg in ["expires", "max_age"]: + if excluded_arg in kwargs: + raise TypeError( + f"clear_cookie() got an unexpected keyword argument '{excluded_arg}'" + ) expires = datetime.datetime.utcnow() - datetime.timedelta(days=365) - self.set_cookie(name, value="", path=path, expires=expires, domain=domain) + self.set_cookie(name, value="", expires=expires, **kwargs) - def clear_all_cookies(self, path: str = "/", domain: Optional[str] = None) -> None: - """Deletes all the cookies the user sent with this request. + def clear_all_cookies(self, **kwargs: Any) -> None: + """Attempt to delete all the cookies the user sent with this request. - See `clear_cookie` for more information on the path and domain - parameters. + See `clear_cookie` for more information on keyword arguments. Due to + limitations of the cookie protocol, it is impossible to determine on the + server side which values are necessary for the ``domain``, ``path``, + ``samesite``, or ``secure`` arguments, this method can only be + successful if you consistently use the same values for these arguments + when setting cookies. - Similar to `set_cookie`, the effect of this method will not be - seen until the following request. + Similar to `set_cookie`, the effect of this method will not be seen + until the following request. .. versionchanged:: 3.2 Added the ``path`` and ``domain`` parameters. + + .. versionchanged:: 6.3 + + Now accepts all keyword arguments that ``set_cookie`` does. + + .. deprecated:: 6.3 + + The increasingly complex rules governing cookies have made it + impossible for a ``clear_all_cookies`` method to work reliably + since all we know about cookies are their names. Applications + should generally use ``clear_cookie`` one at a time instead. """ for name in self.request.cookies: - self.clear_cookie(name, path=path, domain=domain) + self.clear_cookie(name, **kwargs) - def set_secure_cookie( + def set_signed_cookie( self, name: str, value: Union[str, bytes], expires_days: Optional[float] = 30, version: Optional[int] = None, - **kwargs: Any + **kwargs: Any, ) -> None: """Signs and timestamps a cookie so it cannot be forged. @@ -698,11 +731,11 @@ class RequestHandler(object): to use this method. It should be a long, random sequence of bytes to be used as the HMAC secret for the signature. - To read a cookie set with this method, use `get_secure_cookie()`. + To read a cookie set with this method, use `get_signed_cookie()`. Note that the ``expires_days`` parameter sets the lifetime of the cookie in the browser, but is independent of the ``max_age_days`` - parameter to `get_secure_cookie`. + parameter to `get_signed_cookie`. A value of None limits the lifetime to the current browser session. Secure cookies may contain arbitrary byte values, not just unicode @@ -715,22 +748,30 @@ class RequestHandler(object): Added the ``version`` argument. Introduced cookie version 2 and made it the default. + + .. versionchanged:: 6.3 + + Renamed from ``set_secure_cookie`` to ``set_signed_cookie`` to + avoid confusion with other uses of "secure" in cookie attributes + and prefixes. The old name remains as an alias. """ self.set_cookie( name, self.create_signed_value(name, value, version=version), expires_days=expires_days, - **kwargs + **kwargs, ) + set_secure_cookie = set_signed_cookie + def create_signed_value( self, name: str, value: Union[str, bytes], version: Optional[int] = None ) -> bytes: """Signs and timestamps a string so it cannot be forged. - Normally used via set_secure_cookie, but provided as a separate + Normally used via set_signed_cookie, but provided as a separate method for non-cookie uses. To decode a value not stored - as a cookie use the optional value argument to get_secure_cookie. + as a cookie use the optional value argument to get_signed_cookie. .. versionchanged:: 3.2.1 @@ -749,7 +790,7 @@ class RequestHandler(object): secret, name, value, version=version, key_version=key_version ) - def get_secure_cookie( + def get_signed_cookie( self, name: str, value: Optional[str] = None, @@ -763,12 +804,19 @@ class RequestHandler(object): Similar to `get_cookie`, this method only returns cookies that were present in the request. It does not see outgoing cookies set by - `set_secure_cookie` in this handler. + `set_signed_cookie` in this handler. .. versionchanged:: 3.2.1 Added the ``min_version`` argument. Introduced cookie version 2; both versions 1 and 2 are accepted by default. + + .. versionchanged:: 6.3 + + Renamed from ``get_secure_cookie`` to ``get_signed_cookie`` to + avoid confusion with other uses of "secure" in cookie attributes + and prefixes. The old name remains as an alias. + """ self.require_setting("cookie_secret", "secure cookies") if value is None: @@ -781,12 +829,22 @@ class RequestHandler(object): min_version=min_version, ) - def get_secure_cookie_key_version( + get_secure_cookie = get_signed_cookie + + def get_signed_cookie_key_version( self, name: str, value: Optional[str] = None ) -> Optional[int]: """Returns the signing key version of the secure cookie. The version is returned as int. + + .. versionchanged:: 6.3 + + Renamed from ``get_secure_cookie_key_version`` to + ``set_signed_cookie_key_version`` to avoid confusion with other + uses of "secure" in cookie attributes and prefixes. The old name + remains as an alias. + """ self.require_setting("cookie_secret", "secure cookies") if value is None: @@ -795,6 +853,8 @@ class RequestHandler(object): return None return get_signature_key_version(value) + get_secure_cookie_key_version = get_signed_cookie_key_version + def redirect( self, url: str, permanent: bool = False, status: Optional[int] = None ) -> None: @@ -1321,7 +1381,7 @@ class RequestHandler(object): and is cached for future access:: def get_current_user(self): - user_cookie = self.get_secure_cookie("user") + user_cookie = self.get_signed_cookie("user") if user_cookie: return json.loads(user_cookie) return None @@ -1331,7 +1391,7 @@ class RequestHandler(object): @gen.coroutine def prepare(self): - user_id_cookie = self.get_secure_cookie("user_id") + user_id_cookie = self.get_signed_cookie("user_id") if user_id_cookie: self.current_user = yield load_user(user_id_cookie) @@ -1643,7 +1703,7 @@ class RequestHandler(object): # Find all weak and strong etag values from If-None-Match header # because RFC 7232 allows multiple etag values in a single header. etags = re.findall( - br'\*|(?:W/)?"[^"]*"', utf8(self.request.headers.get("If-None-Match", "")) + rb'\*|(?:W/)?"[^"]*"', utf8(self.request.headers.get("If-None-Match", "")) ) if not computed_etag or not etags: return False @@ -1676,20 +1736,16 @@ class RequestHandler(object): ) # If XSRF cookies are turned on, reject form submissions without # the proper cookie - if ( - self.request.method - not in ( - "GET", - "HEAD", - "OPTIONS", - ) - and self.application.settings.get("xsrf_cookies") - ): + if self.request.method not in ( + "GET", + "HEAD", + "OPTIONS", + ) and self.application.settings.get("xsrf_cookies"): self.check_xsrf_cookie() result = self.prepare() if result is not None: - result = await result + result = await result # type: ignore if self._prepared_future is not None: # Tell the Application we've finished with prepare() # and are ready for the body to arrive. @@ -1848,7 +1904,7 @@ def stream_request_body(cls: Type[_RequestHandlerType]) -> Type[_RequestHandlerT * The regular HTTP method (``post``, ``put``, etc) will be called after the entire body has been read. - See the `file receiver demo `_ + See the `file receiver demo `_ for example usage. """ # noqa: E501 if not issubclass(cls, RequestHandler): @@ -2046,7 +2102,7 @@ class Application(ReversibleRouter): handlers: Optional[_RuleList] = None, default_host: Optional[str] = None, transforms: Optional[List[Type["OutputTransform"]]] = None, - **settings: Any + **settings: Any, ) -> None: if transforms is None: self.transforms = [] # type: List[Type[OutputTransform]] @@ -2106,7 +2162,7 @@ class Application(ReversibleRouter): backlog: int = tornado.netutil._DEFAULT_BACKLOG, flags: Optional[int] = None, reuse_port: bool = False, - **kwargs: Any + **kwargs: Any, ) -> HTTPServer: """Starts an HTTP server for this application on the given port. @@ -2393,7 +2449,7 @@ class HTTPError(Exception): status_code: int = 500, log_message: Optional[str] = None, *args: Any, - **kwargs: Any + **kwargs: Any, ) -> None: self.status_code = status_code self.log_message = log_message @@ -3441,7 +3497,7 @@ def create_signed_value( # A leading version number in decimal # with no leading zeros, followed by a pipe. -_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$") +_signed_value_version_re = re.compile(rb"^([1-9][0-9]*)\|(.*)$") def _get_version(value: bytes) -> int: diff --git a/lib/tornado/websocket.py b/lib/tornado/websocket.py index 82c29d84..1d42e10b 100644 --- a/lib/tornado/websocket.py +++ b/lib/tornado/websocket.py @@ -23,7 +23,6 @@ import hashlib import os import sys import struct -import tornado.escape import tornado.web from urllib.parse import urlparse import zlib @@ -34,6 +33,7 @@ from tornado import gen, httpclient, httputil from tornado.ioloop import IOLoop, PeriodicCallback from tornado.iostream import StreamClosedError, IOStream from tornado.log import gen_log, app_log +from tornado.netutil import Resolver from tornado import simple_httpclient from tornado.queues import Queue from tornado.tcpclient import TCPClient @@ -822,7 +822,7 @@ class WebSocketProtocol13(WebSocketProtocol): self._masked_frame = None self._frame_mask = None # type: Optional[bytes] self._frame_length = None - self._fragmented_message_buffer = None # type: Optional[bytes] + self._fragmented_message_buffer = None # type: Optional[bytearray] self._fragmented_message_opcode = None self._waiting = None # type: object self._compression_options = params.compression_options @@ -1177,10 +1177,10 @@ class WebSocketProtocol13(WebSocketProtocol): # nothing to continue self._abort() return - self._fragmented_message_buffer += data + self._fragmented_message_buffer.extend(data) if is_final_frame: opcode = self._fragmented_message_opcode - data = self._fragmented_message_buffer + data = bytes(self._fragmented_message_buffer) self._fragmented_message_buffer = None else: # start of new data message if self._fragmented_message_buffer is not None: @@ -1189,7 +1189,7 @@ class WebSocketProtocol13(WebSocketProtocol): return if not is_final_frame: self._fragmented_message_opcode = opcode - self._fragmented_message_buffer = data + self._fragmented_message_buffer = bytearray(data) if is_final_frame: handled_future = self._handle_message(opcode, data) @@ -1362,6 +1362,7 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): ping_timeout: Optional[float] = None, max_message_size: int = _default_max_message_size, subprotocols: Optional[List[str]] = [], + resolver: Optional[Resolver] = None, ) -> None: self.connect_future = Future() # type: Future[WebSocketClientConnection] self.read_queue = Queue(1) # type: Queue[Union[None, str, bytes]] @@ -1402,7 +1403,7 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): # Websocket connection is currently unable to follow redirects request.follow_redirects = False - self.tcp_client = TCPClient() + self.tcp_client = TCPClient(resolver=resolver) super().__init__( None, request, diff --git a/lib/tornado/wsgi.py b/lib/tornado/wsgi.py index c60f152d..32641be3 100644 --- a/lib/tornado/wsgi.py +++ b/lib/tornado/wsgi.py @@ -27,12 +27,15 @@ container. """ -import sys +import concurrent.futures from io import BytesIO import tornado +import sys +from tornado.concurrent import dummy_executor from tornado import escape from tornado import httputil +from tornado.ioloop import IOLoop from tornado.log import access_log from typing import List, Tuple, Optional, Callable, Any, Dict, Text @@ -54,20 +57,28 @@ def to_wsgi_str(s: bytes) -> str: class WSGIContainer(object): - r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server. + r"""Makes a WSGI-compatible application runnable on Tornado's HTTP server. .. warning:: WSGI is a *synchronous* interface, while Tornado's concurrency model - is based on single-threaded asynchronous execution. This means that - running a WSGI app with Tornado's `WSGIContainer` is *less scalable* - than running the same app in a multi-threaded WSGI server like - ``gunicorn`` or ``uwsgi``. Use `WSGIContainer` only when there are - benefits to combining Tornado and WSGI in the same process that - outweigh the reduced scalability. + is based on single-threaded *asynchronous* execution. Many of Tornado's + distinguishing features are not available in WSGI mode, including efficient + long-polling and websockets. The primary purpose of `WSGIContainer` is + to support both WSGI applications and native Tornado ``RequestHandlers`` in + a single process. WSGI-only applications are likely to be better off + with a dedicated WSGI server such as ``gunicorn`` or ``uwsgi``. - Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to - run it. For example:: + Wrap a WSGI application in a `WSGIContainer` to make it implement the Tornado + `.HTTPServer` ``request_callback`` interface. The `WSGIContainer` object can + then be passed to classes from the `tornado.routing` module, + `tornado.web.FallbackHandler`, or to `.HTTPServer` directly. + + This class is intended to let other frameworks (Django, Flask, etc) + run on the Tornado HTTP server and I/O loop. + + Realistic usage will be more complicated, but the simplest possible example uses a + hand-written WSGI application with `.HTTPServer`:: def simple_app(environ, start_response): status = "200 OK" @@ -83,18 +94,46 @@ class WSGIContainer(object): asyncio.run(main()) - This class is intended to let other frameworks (Django, web.py, etc) - run on the Tornado HTTP server and I/O loop. + The recommended pattern is to use the `tornado.routing` module to set up routing + rules between your WSGI application and, typically, a `tornado.web.Application`. + Alternatively, `tornado.web.Application` can be used as the top-level router + and `tornado.web.FallbackHandler` can embed a `WSGIContainer` within it. - The `tornado.web.FallbackHandler` class is often useful for mixing - Tornado and WSGI apps in the same server. See - https://github.com/bdarnell/django-tornado-demo for a complete example. + If the ``executor`` argument is provided, the WSGI application will be executed + on that executor. This must be an instance of `concurrent.futures.Executor`, + typically a ``ThreadPoolExecutor`` (``ProcessPoolExecutor`` is not supported). + If no ``executor`` is given, the application will run on the event loop thread in + Tornado 6.3; this will change to use an internal thread pool by default in + Tornado 7.0. + + .. warning:: + By default, the WSGI application is executed on the event loop's thread. This + limits the server to one request at a time (per process), making it less scalable + than most other WSGI servers. It is therefore highly recommended that you pass + a ``ThreadPoolExecutor`` when constructing the `WSGIContainer`, after verifying + that your application is thread-safe. The default will change to use a + ``ThreadPoolExecutor`` in Tornado 7.0. + + .. versionadded:: 6.3 + The ``executor`` parameter. + + .. deprecated:: 6.3 + The default behavior of running the WSGI application on the event loop thread + is deprecated and will change in Tornado 7.0 to use a thread pool by default. """ - def __init__(self, wsgi_application: "WSGIAppType") -> None: + def __init__( + self, + wsgi_application: "WSGIAppType", + executor: Optional[concurrent.futures.Executor] = None, + ) -> None: self.wsgi_application = wsgi_application + self.executor = dummy_executor if executor is None else executor def __call__(self, request: httputil.HTTPServerRequest) -> None: + IOLoop.current().spawn_callback(self.handle_request, request) + + async def handle_request(self, request: httputil.HTTPServerRequest) -> None: data = {} # type: Dict[str, Any] response = [] # type: List[bytes] @@ -113,15 +152,33 @@ class WSGIContainer(object): data["headers"] = headers return response.append - app_response = self.wsgi_application( - WSGIContainer.environ(request), start_response + loop = IOLoop.current() + app_response = await loop.run_in_executor( + self.executor, + self.wsgi_application, + self.environ(request), + start_response, ) try: - response.extend(app_response) - body = b"".join(response) + app_response_iter = iter(app_response) + + def next_chunk() -> Optional[bytes]: + try: + return next(app_response_iter) + except StopIteration: + # StopIteration is special and is not allowed to pass through + # coroutines normally. + return None + + while True: + chunk = await loop.run_in_executor(self.executor, next_chunk) + if chunk is None: + break + response.append(chunk) finally: if hasattr(app_response, "close"): app_response.close() # type: ignore + body = b"".join(response) if not data: raise Exception("WSGI app did not call start_response") @@ -147,9 +204,12 @@ class WSGIContainer(object): request.connection.finish() self._log(status_code, request) - @staticmethod - def environ(request: httputil.HTTPServerRequest) -> Dict[Text, Any]: - """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.""" + def environ(self, request: httputil.HTTPServerRequest) -> Dict[Text, Any]: + """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment. + + .. versionchanged:: 6.3 + No longer a static method. + """ hostport = request.host.split(":") if len(hostport) == 2: host = hostport[0] @@ -172,7 +232,7 @@ class WSGIContainer(object): "wsgi.url_scheme": request.protocol, "wsgi.input": BytesIO(escape.utf8(request.body)), "wsgi.errors": sys.stderr, - "wsgi.multithread": False, + "wsgi.multithread": self.executor is not dummy_executor, "wsgi.multiprocess": True, "wsgi.run_once": False, } diff --git a/sickgear/webserve.py b/sickgear/webserve.py index 6ccdec7f..5b83341e 100644 --- a/sickgear/webserve.py +++ b/sickgear/webserve.py @@ -320,7 +320,7 @@ class BaseHandler(RouteHandler): def get_current_user(self): if sickgear.WEB_USERNAME or sickgear.WEB_PASSWORD: - return self.get_secure_cookie('sickgear-session-%s' % helpers.md5_for_text(sickgear.WEB_PORT)) + return self.get_signed_cookie('sickgear-session-%s' % helpers.md5_for_text(sickgear.WEB_PORT)) return True def get_image(self, image): @@ -401,7 +401,7 @@ class LoginHandler(BaseHandler): httponly=True) if sickgear.ENABLE_HTTPS: params.update(dict(secure=True)) - self.set_secure_cookie('sickgear-session-%s' % helpers.md5_for_text(sickgear.WEB_PORT), + self.set_signed_cookie('sickgear-session-%s' % helpers.md5_for_text(sickgear.WEB_PORT), sickgear.COOKIE_SECRET, **params) self.redirect(self.get_argument('next', '/home/')) else: From ae4173e8c0498c347571ecbd365d04e4140bbcf9 Mon Sep 17 00:00:00 2001 From: JackDandy Date: Wed, 8 Mar 2023 13:44:20 +0000 Subject: [PATCH 13/21] Change py2 unicode into f-strings or simple strings where appropriate. Change use specific logger functions for debug, warning, error. --- _cleaner.py | 6 +- lib/api_trakt/trakt.py | 22 +- lib/api_tvdb/tvdb_api.py | 26 +- lib/api_tvdb/tvdb_ui.py | 4 +- lib/certgen.py | 10 +- lib/plex/plex.py | 2 +- lib/sg_helpers.py | 53 ++-- sickgear.py | 75 +++-- sickgear/__init__.py | 24 +- sickgear/anime.py | 30 +- sickgear/auto_post_processer.py | 7 +- sickgear/browser.py | 2 +- sickgear/clients/deluge.py | 15 +- sickgear/clients/download_station.py | 12 +- sickgear/clients/generic.py | 47 ++- sickgear/clients/qbittorrent.py | 16 +- sickgear/clients/rtorrent.py | 6 +- sickgear/clients/transmission.py | 4 +- sickgear/common.py | 10 +- sickgear/config.py | 58 ++-- sickgear/databases/mainDB.py | 206 +++++++------ sickgear/db.py | 58 ++-- sickgear/failedProcessor.py | 25 +- sickgear/failed_history.py | 20 +- sickgear/generic_queue.py | 14 +- sickgear/helpers.py | 104 +++---- sickgear/image_cache.py | 54 ++-- sickgear/indexermapper.py | 2 +- sickgear/logger.py | 2 +- sickgear/metadata/generic.py | 133 ++++----- sickgear/metadata/helpers.py | 6 +- sickgear/metadata/kodi.py | 28 +- sickgear/metadata/mede8er.py | 20 +- sickgear/metadata/mediabrowser.py | 25 +- sickgear/metadata/tivo.py | 19 +- sickgear/metadata/wdtv.py | 8 +- sickgear/metadata/xbmc_12plus.py | 22 +- sickgear/name_parser/parser.py | 24 +- sickgear/naming.py | 26 +- sickgear/network_timezones.py | 29 +- sickgear/notifiers/boxcar2.py | 4 +- sickgear/notifiers/emailnotify.py | 18 +- sickgear/notifiers/emby.py | 26 +- sickgear/notifiers/generic.py | 8 +- sickgear/notifiers/growl.py | 2 +- sickgear/notifiers/kodi.py | 69 +++-- sickgear/notifiers/libnotify.py | 14 +- sickgear/notifiers/nmj.py | 36 +-- sickgear/notifiers/nmjv2.py | 22 +- sickgear/notifiers/plex.py | 32 +- sickgear/notifiers/prowl.py | 2 +- sickgear/notifiers/pushalot.py | 4 +- sickgear/notifiers/pushbullet.py | 2 +- sickgear/notifiers/pytivo.py | 10 +- sickgear/notifiers/synoindex.py | 6 +- sickgear/notifiers/synologynotifier.py | 6 +- sickgear/notifiers/telegram.py | 4 +- sickgear/notifiers/xbmc.py | 94 +++--- sickgear/nzbSplitter.py | 32 +- sickgear/nzbget.py | 22 +- sickgear/postProcessor.py | 234 +++++++-------- sickgear/processTV.py | 131 ++++----- sickgear/properFinder.py | 43 ++- sickgear/providers/__init__.py | 5 +- sickgear/providers/alpharatio.py | 2 +- sickgear/providers/bithdtv.py | 4 +- sickgear/providers/blutopia.py | 6 +- sickgear/providers/btn.py | 13 +- sickgear/providers/eztv.py | 2 +- sickgear/providers/fano.py | 2 +- sickgear/providers/filelist.py | 2 +- sickgear/providers/filesharingtalk.py | 4 +- sickgear/providers/funfile.py | 2 +- sickgear/providers/generic.py | 114 ++++---- sickgear/providers/hdbits.py | 6 +- sickgear/providers/hdspace.py | 2 +- sickgear/providers/hdtorrents.py | 2 +- sickgear/providers/iptorrents.py | 4 +- sickgear/providers/limetorrents.py | 4 +- sickgear/providers/magnetdl.py | 2 +- sickgear/providers/morethan.py | 2 +- sickgear/providers/ncore.py | 2 +- sickgear/providers/nebulance.py | 2 +- sickgear/providers/newznab.py | 24 +- sickgear/providers/nyaa.py | 2 +- sickgear/providers/omgwtfnzbs.py | 11 +- sickgear/providers/pretome.py | 2 +- sickgear/providers/privatehd.py | 6 +- sickgear/providers/ptf.py | 4 +- sickgear/providers/rarbg.py | 2 +- sickgear/providers/revtt.py | 2 +- sickgear/providers/rsstorrent.py | 2 +- sickgear/providers/scenehd.py | 4 +- sickgear/providers/scenetime.py | 4 +- sickgear/providers/shazbat.py | 2 +- sickgear/providers/showrss.py | 2 +- sickgear/providers/snowfl.py | 2 +- sickgear/providers/speedapp.py | 2 +- sickgear/providers/speedcd.py | 4 +- sickgear/providers/thepiratebay.py | 6 +- sickgear/providers/torlock.py | 2 +- sickgear/providers/torrentday.py | 2 +- sickgear/providers/torrenting.py | 4 +- sickgear/providers/torrentleech.py | 2 +- sickgear/providers/tvchaosuk.py | 2 +- sickgear/providers/xspeeds.py | 4 +- sickgear/rssfeeds.py | 6 +- sickgear/sab.py | 18 +- sickgear/scene_exceptions.py | 58 ++-- sickgear/scene_numbering.py | 25 +- sickgear/scheduler.py | 8 +- sickgear/search.py | 148 +++++----- sickgear/search_backlog.py | 24 +- sickgear/search_queue.py | 54 ++-- sickgear/sgdatetime.py | 2 +- sickgear/show_name_helpers.py | 24 +- sickgear/show_queue.py | 99 +++---- sickgear/show_updater.py | 56 ++-- sickgear/subtitles.py | 20 +- sickgear/tv.py | 386 ++++++++++++------------- sickgear/tv_base.py | 4 +- sickgear/tvcache.py | 19 +- sickgear/version_checker.py | 87 +++--- sickgear/watchedstate_queue.py | 2 +- sickgear/webapi.py | 42 ++- sickgear/webserve.py | 228 +++++++-------- sickgear/webserveInit.py | 11 +- tests/migration_tests.py | 20 +- tests/name_parser_tests.py | 10 +- tests/newznab_tests.py | 12 +- tests/scene_helpers_tests.py | 4 +- tests/test_lib.py | 4 +- tests/xem_tests.py | 4 +- 133 files changed, 1799 insertions(+), 1930 deletions(-) diff --git a/_cleaner.py b/_cleaner.py index 8c6eac00..60b21232 100644 --- a/_cleaner.py +++ b/_cleaner.py @@ -125,7 +125,7 @@ for cleaned_path, test_path, dir_list in cleanups: pass with io.open(cleaned_file, 'w+', encoding='utf-8') as fp: - fp.write(u'This file exists to prevent a rerun delete of *.pyc, *.pyo files') + fp.write('This file exists to prevent a rerun delete of *.pyc, *.pyo files') fp.flush() os.fsync(fp.fileno()) @@ -166,10 +166,10 @@ if not os.path.isfile(cleaned_file) or os.path.exists(test): swap_name = cleaned_file cleaned_file = danger_output danger_output = swap_name - msg = u'Failed (permissions?) to delete file(s). You must manually delete:\r\n%s' % '\r\n'.join(bad_files) + msg = 'Failed (permissions?) to delete file(s). You must manually delete:\r\n%s' % '\r\n'.join(bad_files) print(msg) else: - msg = u'This file exists to prevent a rerun delete of dead lib/html5lib files' + msg = 'This file exists to prevent a rerun delete of dead lib/html5lib files' with io.open(cleaned_file, 'w+', encoding='utf-8') as fp: fp.write(msg) diff --git a/lib/api_trakt/trakt.py b/lib/api_trakt/trakt.py index 663d944f..fed70438 100644 --- a/lib/api_trakt/trakt.py +++ b/lib/api_trakt/trakt.py @@ -277,7 +277,7 @@ class TraktAPI(object): code = getattr(e.response, 'status_code', None) if not code: if 'timed out' in ex(e): - log.warning(u'Timeout connecting to Trakt') + log.warning('Timeout connecting to Trakt') if count >= self.max_retrys: raise TraktTimeout() return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, @@ -285,12 +285,12 @@ class TraktAPI(object): # This is pretty much a fatal error if there is no status_code # It means there basically was no response at all else: - log.warning(u'Could not connect to Trakt. Error: %s' % ex(e)) + log.warning('Could not connect to Trakt. Error: %s' % ex(e)) raise TraktException('Could not connect to Trakt. Error: %s' % ex(e)) elif 502 == code: # Retry the request, Cloudflare had a proxying issue - log.warning(u'Retrying Trakt api request: %s' % path) + log.warning(f'Retrying Trakt api request: {path}') if count >= self.max_retrys: raise TraktCloudFlareException() return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, @@ -303,7 +303,7 @@ class TraktAPI(object): return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, send_oauth=send_oauth, method=method) - log.warning(u'Unauthorized. Please check your Trakt settings') + log.warning('Unauthorized. Please check your Trakt settings') sickgear.TRAKT_ACCOUNTS[send_oauth].auth_failure() raise TraktAuthException() @@ -318,18 +318,18 @@ class TraktAPI(object): raise TraktAuthException() elif code in (500, 501, 503, 504, 520, 521, 522): if count >= self.max_retrys: - log.warning(u'Trakt may have some issues and it\'s unavailable. Code: %s' % code) + log.warning(f'Trakt may have some issues and it\'s unavailable. Code: {code}') raise TraktServerError(error_code=code) # http://docs.trakt.apiary.io/#introduction/status-codes - log.warning(u'Trakt may have some issues and it\'s unavailable. Trying again') + log.warning('Trakt may have some issues and it\'s unavailable. Trying again') return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, send_oauth=send_oauth, method=method) elif 404 == code: - log.warning(u'Trakt error (404) the resource does not exist: %s%s' % (url, path)) + log.warning(f'Trakt error (404) the resource does not exist: {url}{path}') raise TraktMethodNotExisting('Trakt error (404) the resource does not exist: %s%s' % (url, path)) elif 429 == code: if count >= self.max_retrys: - log.warning(u'Trakt replied with Rate-Limiting, maximum retries exceeded.') + log.warning('Trakt replied with Rate-Limiting, maximum retries exceeded.') raise TraktServerError(error_code=code) r_headers = getattr(e.response, 'headers', None) if None is not r_headers: @@ -356,14 +356,14 @@ class TraktAPI(object): 'revoked, does not match the redirection URI used in the authorization request,' ' or was issued to another client.') else: - log.error(u'Could not connect to Trakt. Code error: {0}'.format(code)) + log.error('Could not connect to Trakt. Code error: {0}'.format(code)) raise TraktException('Could not connect to Trakt. Code error: %s' % code) except ConnectionSkipException as e: log.warning('Connection is skipped') raise e except ValueError as e: - log.error(u'Value Error: %s' % ex(e)) - raise TraktValueError(u'Value Error: %s' % ex(e)) + log.error(f'Value Error: {ex(e)}') + raise TraktValueError(f'Value Error: {ex(e)}') except (BaseException, Exception) as e: log.error('Exception: %s' % ex(e)) raise TraktException('Could not connect to Trakt. Code error: %s' % ex(e)) diff --git a/lib/api_tvdb/tvdb_api.py b/lib/api_tvdb/tvdb_api.py index dc679ad5..c254d9a0 100644 --- a/lib/api_tvdb/tvdb_api.py +++ b/lib/api_tvdb/tvdb_api.py @@ -138,7 +138,7 @@ class Tvdb(TVInfoBase): """Create easy-to-use interface to name of season/episode name >> t = Tvdb() >> t['Scrubs'][1][24]['episodename'] - u'My Last Day' + 'My Last Day' """ map_languages = {} reverse_map_languages = {v: k for k, v in iteritems(map_languages)} @@ -201,7 +201,7 @@ class Tvdb(TVInfoBase): >> t = Tvdb(actors=True) >> t['scrubs']['actors'][0]['name'] - u'Zach Braff' + 'Zach Braff' custom_ui (tvdb_ui.BaseUI subclass): A callable subclass of tvdb_ui.BaseUI (overrides interactive option) @@ -580,7 +580,7 @@ class Tvdb(TVInfoBase): data_list.append(cr) resp['data'] = data_list return resp - return dict([(u'data', (None, resp)[isinstance(resp, string_types)])]) + return dict([('data', (None, resp)[isinstance(resp, string_types)])]) def _getetsrc(self, url, params=None, language=None, parse_json=False): """Loads a URL using caching @@ -1015,14 +1015,14 @@ class Tvdb(TVInfoBase): url_image = self._make_image(self.config['url_artworks'], image_data['data'][0]['filename']) url_thumb = self._make_image(self.config['url_artworks'], image_data['data'][0]['thumbnail']) self._set_show_data(sid, image_type, url_image) - self._set_show_data(sid, u'%s_thumb' % image_type, url_thumb) + self._set_show_data(sid, f'{image_type}_thumb', url_thumb) excluded_main_data = True # artwork found so prevent fallback self._parse_banners(sid, image_data['data']) self.shows[sid].__dict__[loaded_name] = True # fallback image thumbnail for none excluded_main_data if artwork is not found if not excluded_main_data and show_data['data'].get(image_type): - self._set_show_data(sid, u'%s_thumb' % image_type, + self._set_show_data(sid, f'{image_type}_thumb', re.sub(r'\.jpg$', '_t.jpg', show_data['data'][image_type], flags=re.I)) def _get_show_data(self, @@ -1067,11 +1067,11 @@ class Tvdb(TVInfoBase): else: show_data = {'data': {}} - for img_type, en_type, p_type in [(u'poster', 'posters_enabled', posters), - (u'banner', 'banners_enabled', banners), - (u'fanart', 'fanart_enabled', fanart), - (u'season', 'seasons_enabled', seasons), - (u'seasonwide', 'seasonwides_enabled', seasonwides)]: + for img_type, en_type, p_type in [('poster', 'posters_enabled', posters), + ('banner', 'banners_enabled', banners), + ('fanart', 'fanart_enabled', fanart), + ('season', 'seasons_enabled', seasons), + ('seasonwide', 'seasonwides_enabled', seasonwides)]: self._parse_images(sid, language, show_data, img_type, en_type, p_type) if (actors or self.config['actors_enabled']) and not getattr(self.shows.get(sid), 'actors_loaded', False): @@ -1175,9 +1175,9 @@ class Tvdb(TVInfoBase): else: page += 1 - ep_map_keys = {'absolutenumber': u'absolute_number', 'airedepisodenumber': u'episodenumber', - 'airedseason': u'seasonnumber', 'airedseasonid': u'seasonid', - 'dvdepisodenumber': u'dvd_episodenumber', 'dvdseason': u'dvd_season'} + ep_map_keys = {'absolutenumber': 'absolute_number', 'airedepisodenumber': 'episodenumber', + 'airedseason': 'seasonnumber', 'airedseasonid': 'seasonid', + 'dvdepisodenumber': 'dvd_episodenumber', 'dvdseason': 'dvd_season'} for cur_ep in episodes: if self.config['dvdorder']: diff --git a/lib/api_tvdb/tvdb_ui.py b/lib/api_tvdb/tvdb_ui.py index fae43830..19765764 100644 --- a/lib/api_tvdb/tvdb_ui.py +++ b/lib/api_tvdb/tvdb_ui.py @@ -17,8 +17,8 @@ It must have a method "select_series", this is passed a list of dicts, each dict contains the the keys "name" (human readable show name), and "sid" (the shows ID as on thetvdb.com). For example: -[{'name': u'Lost', 'sid': u'73739'}, - {'name': u'Lost Universe', 'sid': u'73181'}] +[{'name': 'Lost', 'sid': '73739'}, + {'name': 'Lost Universe', 'sid': '73181'}] The "select_series" method must return the appropriate dict, or it can raise tvdb_userabort (if the selection is aborted), tvdb_shownotfound (if the show diff --git a/lib/certgen.py b/lib/certgen.py index 0f2b3788..82260b8a 100644 --- a/lib/certgen.py +++ b/lib/certgen.py @@ -77,7 +77,7 @@ def generate_key(key_size=4096, output_file='server.key'): # Ported from cryptography docs/x509/tutorial.rst def generate_local_cert(private_key, days_valid=3650, output_file='server.crt', loc_name=None, org_name=None): - def_name = u'SickGear' + def_name = 'SickGear' # Various details about who we are. For a self-signed certificate the # subject and issuer are always the same. @@ -88,7 +88,7 @@ def generate_local_cert(private_key, days_valid=3650, output_file='server.crt', # build Subject Alternate Names (aka SAN) list # First the host names, add with x509.DNSName(): - san_list = [x509.DNSName(u'localhost')] + san_list = [x509.DNSName('localhost')] try: thishostname = text_type(socket.gethostname()) san_list.append(x509.DNSName(thishostname)) @@ -100,13 +100,13 @@ def generate_local_cert(private_key, days_valid=3650, output_file='server.crt', try: # noinspection PyCompatibility from ipaddress import IPv4Address, IPv6Address - san_list.append(x509.IPAddress(IPv4Address(u'127.0.0.1'))) - san_list.append(x509.IPAddress(IPv6Address(u'::1'))) + san_list.append(x509.IPAddress(IPv4Address('127.0.0.1'))) + san_list.append(x509.IPAddress(IPv6Address('::1'))) # append local v4 ip mylocalipv4 = localipv4() if mylocalipv4: - san_list.append(x509.IPAddress(IPv4Address(u'' + mylocalipv4))) + san_list.append(x509.IPAddress(IPv4Address('' + mylocalipv4))) except (ImportError, Exception): pass diff --git a/lib/plex/plex.py b/lib/plex/plex.py index 18996ce7..e024b03e 100644 --- a/lib/plex/plex.py +++ b/lib/plex/plex.py @@ -96,7 +96,7 @@ class Plex(object): if self.use_logger: msg = 'Plex:: ' + msg if debug: - logger.log(msg, logger.DEBUG) + logger.debug(msg) else: logger.log(msg) # else: diff --git a/lib/sg_helpers.py b/lib/sg_helpers.py index 41f99ef9..4dc99d1c 100644 --- a/lib/sg_helpers.py +++ b/lib/sg_helpers.py @@ -660,7 +660,7 @@ def clean_data(data): if isinstance(data, dict): return {k: clean_data(v) for k, v in iteritems(data)} if isinstance(data, string_types): - return unicodedata.normalize('NFKD', html_unescape(data).strip().replace(u'&', u'&')) + return unicodedata.normalize('NFKD', html_unescape(data).strip().replace('&', '&')) return data @@ -938,8 +938,8 @@ def get_url(url, # type: AnyStr else: http_err_text = 'Custom HTTP error code' if 'mute_http_error' not in mute: - logger.debug(u'Response not ok. %s: %s from requested url %s' - % (response.status_code, http_err_text, url)) + logger.debug(f'Response not ok. {response.status_code}: {http_err_text} from requested url' + f' {url}') except requests.exceptions.HTTPError as e: raised = e @@ -948,29 +948,29 @@ def get_url(url, # type: AnyStr not (exclude_client_http_codes and is_client_error): connection_fail_params = dict(fail_type=ConnectionFailTypes.http, code=e.response.status_code) if not raise_status_code: - logger.warning(u'HTTP error %s while loading URL%s' % (e.errno, _maybe_request_url(e))) + logger.warning(f'HTTP error {e.errno} while loading URL{_maybe_request_url(e)}') except requests.exceptions.ConnectionError as e: raised = e if 'mute_connect_err' not in mute: - logger.warning(u'Connection error msg:%s while loading URL%s' % (ex(e), _maybe_request_url(e))) + logger.warning(f"Connection error msg:{ex(e)} while loading URL{_maybe_request_url(e)}") if failure_monitor: connection_fail_params = dict(fail_type=ConnectionFailTypes.connection) except requests.exceptions.ReadTimeout as e: raised = e if 'mute_read_timeout' not in mute: - logger.warning(u'Read timed out msg:%s while loading URL%s' % (ex(e), _maybe_request_url(e))) + logger.warning(f'Read timed out msg:{ex(e)} while loading URL{_maybe_request_url(e)}') if failure_monitor: connection_fail_params = dict(fail_type=ConnectionFailTypes.timeout) except (requests.exceptions.Timeout, socket.timeout) as e: raised = e if 'mute_connect_timeout' not in mute: - logger.warning(u'Connection timed out msg:%s while loading URL %s' % (ex(e), _maybe_request_url(e, url))) + logger.warning(f'Connection timed out msg:{ex(e)} while loading URL {_maybe_request_url(e, url)}') if failure_monitor: connection_fail_params = dict(fail_type=ConnectionFailTypes.connection_timeout) except (BaseException, Exception) as e: raised = e - logger.warning((u'Exception caught while loading URL {0}\r\nDetail... %s\r\n{1}' % ex(e), - u'Unknown exception while loading URL {0}\r\nDetail... {1}')[not ex(e)] + logger.warning(('Exception caught while loading URL {0}\r\nDetail... %s\r\n{1}' % ex(e), + 'Unknown exception while loading URL {0}\r\nDetail... {1}')[not ex(e)] .format(url, traceback.format_exc())) if failure_monitor: connection_fail_params = dict(fail_type=ConnectionFailTypes.other) @@ -1009,8 +1009,8 @@ def get_url(url, # type: AnyStr result = result, session except (TypeError, Exception) as e: raised = e - logger.warning(u'%s data issue from URL %s\r\nDetail... %s' % ( - ('Proxy browser', 'JSON')[parse_json], url, ex(e))) + logger.warning(f'{("Proxy browser", "JSON")[parse_json]} data issue from URL {url}\r\n' + f'Detail... {ex(e)}') elif savename: try: @@ -1135,15 +1135,15 @@ def fix_set_group_id(child_path): user_id = os.geteuid() # only available on UNIX if 0 != user_id and user_id != child_path_owner: - logger.debug(u'Not running as root or owner of %s, not trying to set the set-group-id' % child_path) + logger.debug(f'Not running as root or owner of {child_path}, not trying to set the set-group-id') return try: os.chown(child_path, -1, parent_gid) # only available on UNIX - logger.debug(u'Respecting the set-group-ID bit on the parent directory for %s' % child_path) + logger.debug(f'Respecting the set-group-ID bit on the parent directory for {child_path}') except OSError: - logger.error(u'Failed to respect the set-group-id bit on the parent directory for %s (setting group id %i)' - % (child_path, parent_gid)) + logger.error(f'Failed to respect the set-group-id bit on the parent directory for {child_path}' + f' (setting group id {parent_gid:d})') def remove_file_perm(filepath, log_err=True): @@ -1203,9 +1203,9 @@ def remove_file(filepath, tree=False, prefix_failure='', log_level=logging.INFO) os.remove(filepath) except OSError as e: if getattr(e, 'winerror', 0) not in (5, 32): # 5=access denied (e.g. av), 32=another process has lock - logger.log(level=log_level, msg=u'%sUnable to %s %s %s: %s' % - (prefix_failure, ('delete', 'trash')[TRASH_REMOVE_SHOW], - ('file', 'dir')[tree], filepath, ex(e))) + logger.log(level=log_level, + msg=f'{prefix_failure}Unable to {("delete", "trash")[TRASH_REMOVE_SHOW]}' + f' {("file", "dir")[tree]} {filepath}: {ex(e)}') break time.sleep(t) if not os.path.exists(filepath): @@ -1258,10 +1258,10 @@ def make_path(name, syno=False): # Windows, create all missing folders if os.name in ('nt', 'ce'): try: - logger.debug(u'Path %s doesn\'t exist, creating it' % name) + logger.debug(f"Path {name} doesn't exist, creating it") os.makedirs(name) except (OSError, IOError) as e: - logger.error(u'Failed creating %s : %s' % (name, ex(e))) + logger.error(f'Failed creating {name} : {ex(e)}') return False # not Windows, create all missing folders and set permissions @@ -1278,7 +1278,7 @@ def make_path(name, syno=False): continue try: - logger.debug(u'Path %s doesn\'t exist, creating it' % sofar) + logger.debug(f"Path {sofar} doesn't exist, creating it") os.mkdir(sofar) # use normpath to remove end separator, otherwise checks permissions against itself chmod_as_parent(os.path.normpath(sofar)) @@ -1286,7 +1286,7 @@ def make_path(name, syno=False): # do the library update for synoindex NOTIFIERS.NotifierFactory().get('SYNOINDEX').addFolder(sofar) except (OSError, IOError) as e: - logger.error(u'Failed creating %s : %s' % (sofar, ex(e))) + logger.error(f'Failed creating {sofar} : {ex(e)}') return False return True @@ -1306,7 +1306,7 @@ def chmod_as_parent(child_path): parent_path = os.path.dirname(child_path) if not parent_path: - logger.debug(u'No parent path provided in %s, unable to get permissions from it' % child_path) + logger.debug(f'No parent path provided in {child_path}, unable to get permissions from it') return parent_path_stat = os.stat(parent_path) @@ -1327,15 +1327,14 @@ def chmod_as_parent(child_path): user_id = os.geteuid() # only available on UNIX if 0 != user_id and user_id != child_path_owner: - logger.debug(u'Not running as root or owner of %s, not trying to set permissions' % child_path) + logger.debug(f'Not running as root or owner of {child_path}, not trying to set permissions') return try: os.chmod(child_path, child_mode) - logger.debug(u'Setting permissions for %s to %o as parent directory has %o' - % (child_path, child_mode, parent_mode)) + logger.debug(f'Setting permissions for {child_path} to {child_mode:o} as parent directory has {parent_mode:o}') except OSError: - logger.error(u'Failed to set permission for %s to %o' % (child_path, child_mode)) + logger.error(f'Failed to set permission for {child_path} to {child_mode:o}') def file_bit_filter(mode): diff --git a/sickgear.py b/sickgear.py index 4ccb25de..3cc70492 100755 --- a/sickgear.py +++ b/sickgear.py @@ -190,7 +190,7 @@ class SickGear(object): rc.load_msg = load_msg rc.run(max_v) else: - print(u'ERROR: Could not download Rollback Module.') + print('ERROR: Could not download Rollback Module.') except (BaseException, Exception): pass @@ -290,13 +290,13 @@ class SickGear(object): if self.run_as_daemon: pid_dir = os.path.dirname(self.pid_file) if not os.access(pid_dir, os.F_OK): - sys.exit(u"PID dir: %s doesn't exist. Exiting." % pid_dir) + sys.exit(f"PID dir: {pid_dir} doesn't exist. Exiting.") if not os.access(pid_dir, os.W_OK): - sys.exit(u'PID dir: %s must be writable (write permissions). Exiting.' % pid_dir) + sys.exit(f'PID dir: {pid_dir} must be writable (write permissions). Exiting.') else: if self.console_logging: - print(u'Not running in daemon mode. PID file creation disabled') + print('Not running in daemon mode. PID file creation disabled') self.create_pid = False @@ -309,27 +309,27 @@ class SickGear(object): try: os.makedirs(sickgear.DATA_DIR, 0o744) except os.error: - sys.exit(u'Unable to create data directory: %s Exiting.' % sickgear.DATA_DIR) + sys.exit(f'Unable to create data directory: {sickgear.DATA_DIR} Exiting.') # Make sure we can write to the data dir if not os.access(sickgear.DATA_DIR, os.W_OK): - sys.exit(u'Data directory: %s must be writable (write permissions). Exiting.' % sickgear.DATA_DIR) + sys.exit(f'Data directory: {sickgear.DATA_DIR} must be writable (write permissions). Exiting.') # Make sure we can write to the config file if not os.access(sickgear.CONFIG_FILE, os.W_OK): if os.path.isfile(sickgear.CONFIG_FILE): - sys.exit(u'Config file: %s must be writeable (write permissions). Exiting.' % sickgear.CONFIG_FILE) + sys.exit(f'Config file: {sickgear.CONFIG_FILE} must be writeable (write permissions). Exiting.') elif not os.access(os.path.dirname(sickgear.CONFIG_FILE), os.W_OK): - sys.exit(u'Config file directory: %s must be writeable (write permissions). Exiting' - % os.path.dirname(sickgear.CONFIG_FILE)) + sys.exit(f'Config file directory: {os.path.dirname(sickgear.CONFIG_FILE)}' + f' must be writeable (write permissions). Exiting') os.chdir(sickgear.DATA_DIR) if self.console_logging: - print(u'Starting up SickGear from %s' % sickgear.CONFIG_FILE) + print(f'Starting up SickGear from {sickgear.CONFIG_FILE}') # Load the config and publish it to the sickgear package if not os.path.isfile(sickgear.CONFIG_FILE): - print(u'Unable to find "%s", all settings will be default!' % sickgear.CONFIG_FILE) + print(f'Unable to find "{sickgear.CONFIG_FILE}", all settings will be default!') sickgear.CFG = ConfigObj(sickgear.CONFIG_FILE) try: @@ -353,7 +353,7 @@ class SickGear(object): sickgear.initialize(console_logging=self.console_logging) if self.forced_port: - logger.log(u'Forcing web server to port %s' % self.forced_port) + logger.log(f'Forcing web server to port {self.forced_port}') self.start_port = self.forced_port else: self.start_port = sickgear.WEB_PORT @@ -403,12 +403,11 @@ class SickGear(object): self.webserver.wait_server_start() sickgear.started = True except (BaseException, Exception): - logger.log(u'Unable to start web server, is something else running on port %d?' % self.start_port, - logger.ERROR) + logger.error(f'Unable to start web server, is something else running on port {self.start_port:d}?') if self.run_as_systemd: self.exit(0) if sickgear.LAUNCH_BROWSER and not self.no_launch: - logger.log(u'Launching browser and exiting', logger.ERROR) + logger.error('Launching browser and exiting') sickgear.launch_browser(self.start_port) self.exit(1) @@ -439,11 +438,11 @@ class SickGear(object): self.execute_rollback(mo, max_v, load_msg) cur_db_version = db.DBConnection(d).check_db_version() if 100000 <= cur_db_version: - print(u'Rollback to production failed.') - sys.exit(u'If you have used other forks, your database may be unusable due to their changes') + print('Rollback to production failed.') + sys.exit('If you have used other forks, your database may be unusable due to their changes') if 100000 <= max_v and None is not base_v: max_v = base_v # set max_v to the needed base production db for test_db - print(u'Rollback to production of [%s] successful.' % d) + print(f'Rollback to production of [{d}] successful.') sickgear.classes.loading_msg.set_msg_progress(load_msg, 'Finished') # handling of production version higher than current base of test db @@ -454,30 +453,29 @@ class SickGear(object): self.execute_rollback(mo, base_v, load_msg) cur_db_version = db.DBConnection(d).check_db_version() if 100000 <= cur_db_version: - print(u'Rollback to production base failed.') - sys.exit(u'If you have used other forks, your database may be unusable due to their changes') + print('Rollback to production base failed.') + sys.exit('If you have used other forks, your database may be unusable due to their changes') if 100000 <= max_v and None is not base_v: max_v = base_v # set max_v to the needed base production db for test_db - print(u'Rollback to production base of [%s] successful.' % d) + print(f'Rollback to production base of [{d}] successful.') sickgear.classes.loading_msg.set_msg_progress(load_msg, 'Finished') # handling of production db versions if 0 < cur_db_version < 100000: if cur_db_version < min_v: - print(u'Your [%s] database version (%s) is too old to migrate from with this version of SickGear' - % (d, cur_db_version)) - sys.exit(u'Upgrade using a previous version of SG first,' - + u' or start with no database file to begin fresh') + print(f'Your [{d}] database version ({cur_db_version})' + f' is too old to migrate from with this version of SickGear') + sys.exit('Upgrade using a previous version of SG first,' + ' or start with no database file to begin fresh') if cur_db_version > max_v: sickgear.classes.loading_msg.set_msg_progress(load_msg, 'Rollback') - print(u'Your [%s] database version (%s) has been incremented past' - u' what this version of SickGear supports. Trying to rollback now. Please wait...' % - (d, cur_db_version)) + print(f'Your [{d}] database version ({cur_db_version}) has been incremented past what this' + f' version of SickGear supports. Trying to rollback now. Please wait...') self.execute_rollback(mo, max_v, load_msg) if db.DBConnection(d).check_db_version() > max_v: - print(u'Rollback failed.') - sys.exit(u'If you have used other forks, your database may be unusable due to their changes') - print(u'Rollback of [%s] successful.' % d) + print('Rollback failed.') + sys.exit('If you have used other forks, your database may be unusable due to their changes') + print(f'Rollback of [{d}] successful.') sickgear.classes.loading_msg.set_msg_progress(load_msg, 'Finished') # migrate the config if it needs it @@ -501,9 +499,9 @@ class SickGear(object): if os.path.exists(restore_dir): sickgear.classes.loading_msg.message = 'Restoring files' if self.restore(restore_dir, sickgear.DATA_DIR): - logger.log(u'Restore successful...') + logger.log('Restore successful...') else: - logger.log_error_and_exit(u'Restore FAILED!') + logger.log_error_and_exit('Restore FAILED!') # refresh network timezones sickgear.classes.loading_msg.message = 'Checking network timezones' @@ -669,7 +667,7 @@ class SickGear(object): # Write pid if self.create_pid: pid = str(os.getpid()) - logger.log(u'Writing PID: %s to %s' % (pid, self.pid_file)) + logger.log(f'Writing PID: {pid} to {self.pid_file}') try: os.fdopen(os.open(self.pid_file, os.O_CREAT | os.O_WRONLY, 0o644), 'w').write('%s\n' % pid) except (BaseException, Exception) as er: @@ -705,7 +703,7 @@ class SickGear(object): Populates the showList with shows from the database """ - logger.log(u'Loading initial show list') + logger.log('Loading initial show list') my_db = db.DBConnection(row_type='dict') sql_result = my_db.select( @@ -749,8 +747,7 @@ class SickGear(object): sickgear.showDict[show_obj.sid_int] = show_obj _ = show_obj.ids except (BaseException, Exception) as err: - logger.log('There was an error creating the show in %s: %s' % ( - cur_result['location'], ex(err)), logger.ERROR) + logger.error('There was an error creating the show in %s: %s' % (cur_result['location'], ex(err))) sickgear.webserve.Home.make_showlist_unique_names() @staticmethod @@ -801,13 +798,13 @@ class SickGear(object): popen_list += sickgear.MY_ARGS if self.run_as_systemd: - logger.log(u'Restarting SickGear with exit(1) handler and %s' % popen_list) + logger.log(f'Restarting SickGear with exit(1) handler and {popen_list}') logger.close() self.exit(1) if '--nolaunch' not in popen_list: popen_list += ['--nolaunch'] - logger.log(u'Restarting SickGear with %s' % popen_list) + logger.log(f'Restarting SickGear with {popen_list}') logger.close() from _23 import Popen with Popen(popen_list, cwd=os.getcwd()): diff --git a/sickgear/__init__.py b/sickgear/__init__.py index 53d7e3b7..305952a2 100644 --- a/sickgear/__init__.py +++ b/sickgear/__init__.py @@ -803,7 +803,7 @@ def init_stage_1(console_logging): CACHE_DIR = ACTUAL_CACHE_DIR if not helpers.make_dir(CACHE_DIR): - logger.log(u'!!! Creating local cache dir failed, using system default', logger.ERROR) + logger.error('!!! creating local cache dir failed, using system default') CACHE_DIR = None # clean cache folders @@ -811,7 +811,7 @@ def init_stage_1(console_logging): helpers.clear_cache() ZONEINFO_DIR = os.path.join(CACHE_DIR, 'zoneinfo') if not os.path.isdir(ZONEINFO_DIR) and not helpers.make_path(ZONEINFO_DIR): - logger.log(u'!!! Creating local zoneinfo dir failed', logger.ERROR) + logger.error('!!! creating local zoneinfo dir failed') sg_helpers.CACHE_DIR = CACHE_DIR sg_helpers.DATA_DIR = DATA_DIR @@ -830,7 +830,7 @@ def init_stage_1(console_logging): TRIM_ZERO = bool(check_setting_int(CFG, 'GUI', 'trim_zero', 0)) DATE_PRESET = check_setting_str(CFG, 'GUI', 'date_preset', '%x') TIME_PRESET_W_SECONDS = check_setting_str(CFG, 'GUI', 'time_preset', '%I:%M:%S %p') - TIME_PRESET = TIME_PRESET_W_SECONDS.replace(u':%S', u'') + TIME_PRESET = TIME_PRESET_W_SECONDS.replace(':%S', '') TIMEZONE_DISPLAY = check_setting_str(CFG, 'GUI', 'timezone_display', 'network') SHOW_TAGS = check_setting_str(CFG, 'GUI', 'show_tags', 'Show List').split(',') SHOW_TAG_DEFAULT = check_setting_str(CFG, 'GUI', 'show_tag_default', @@ -842,7 +842,7 @@ def init_stage_1(console_logging): LOG_DIR = os.path.normpath(os.path.join(DATA_DIR, ACTUAL_LOG_DIR)) if not helpers.make_dir(LOG_DIR): - logger.log(u'!!! No log folder, logging to screen only!', logger.ERROR) + logger.error('!!! no log folder, logging to screen only!') FILE_LOGGING_PRESET = check_setting_str(CFG, 'General', 'file_logging_preset', 'DEBUG') if bool(check_setting_int(CFG, 'General', 'file_logging_db', 0)): @@ -1488,7 +1488,7 @@ def init_stage_1(console_logging): ('docker/other', 'snap')['snap' in CUR_COMMIT_HASH] if not os.path.isfile(CONFIG_FILE): - logger.log(u'Unable to find \'%s\', all settings will be default!' % CONFIG_FILE, logger.DEBUG) + logger.debug(f'Unable to find \'{CONFIG_FILE}\', all settings will be default!') update_config = True # Get expected config version @@ -1747,20 +1747,20 @@ def restart(soft=True, update_pkg=None): if update_pkg: MY_ARGS.append('--update-pkg') - logger.log(u'Trigger event restart') + logger.log('Trigger event restart') events.put(events.SystemEvent.RESTART) else: halt() save_all() - logger.log(u'Re-initializing all data') + logger.log('Re-initializing all data') initialize() def sig_handler(signum=None, _=None): is_ctrlbreak = 'win32' == sys.platform and signal.SIGBREAK == signum - msg = u'Signal "%s" found' % (signal.SIGINT == signum and 'CTRL-C' or is_ctrlbreak and 'CTRL+BREAK' or - signal.SIGTERM == signum and 'Termination' or signum) + msg = 'Signal "%s" found' % (signal.SIGINT == signum and 'CTRL-C' or is_ctrlbreak and 'CTRL+BREAK' or + signal.SIGTERM == signum and 'Termination' or signum) if None is signum or signum in (signal.SIGINT, signal.SIGTERM) or is_ctrlbreak: logger.log('%s, saving and exiting...' % msg) events.put(events.SystemEvent.SHUTDOWN) @@ -1831,12 +1831,12 @@ def save_all(): global showList # write all shows - logger.log(u'Saving all shows to the database') + logger.log('Saving all shows to the database') for show_obj in showList: # type: tv.TVShow show_obj.save_to_db() # save config - logger.log(u'Saving config file to disk') + logger.log('Saving config file to disk') save_config() @@ -2400,4 +2400,4 @@ def launch_browser(start_port=None): try: webbrowser.open(browser_url, 1, True) except (BaseException, Exception): - logger.log('Unable to launch a browser', logger.ERROR) + logger.error('Unable to launch a browser') diff --git a/sickgear/anime.py b/sickgear/anime.py index 47eeb48d..b5f0938f 100644 --- a/sickgear/anime.py +++ b/sickgear/anime.py @@ -52,7 +52,7 @@ class AniGroupList(object): self.load() def load(self): - logger.log(u'Building allow amd block list for %s' % self.tvid_prodid, logger.DEBUG) + logger.debug(f'Building allow amd block list for {self.tvid_prodid}') self.allowlist = self._load_list('allowlist') self.blocklist = self._load_list('blocklist') @@ -74,8 +74,7 @@ class AniGroupList(object): for cur_result in sql_result: groups.append(cur_result['keyword']) - logger.log('AniPermsList: %s loaded keywords from %s: %s' % (self.tvid_prodid, table, groups), - logger.DEBUG) + logger.debug('AniPermsList: %s loaded keywords from %s: %s' % (self.tvid_prodid, table, groups)) return groups @@ -88,7 +87,7 @@ class AniGroupList(object): self._del_all_keywords('allowlist') self._add_keywords('allowlist', values) self.allowlist = values - logger.log('Allowlist set to: %s' % self.allowlist, logger.DEBUG) + logger.debug('Allowlist set to: %s' % self.allowlist) def set_block_keywords(self, values): # type: (List[AnyStr]) -> None @@ -99,7 +98,7 @@ class AniGroupList(object): self._del_all_keywords('blocklist') self._add_keywords('blocklist', values) self.blocklist = values - logger.log('Blocklist set to: %s' % self.blocklist, logger.DEBUG) + logger.debug('Blocklist set to: %s' % self.blocklist) def _del_all_keywords(self, table): # type: (AnyStr) -> None @@ -133,15 +132,14 @@ class AniGroupList(object): :return: True or False """ if not result.release_group: - logger.log('Failed to detect release group, invalid result', logger.DEBUG) + logger.debug('Failed to detect release group, invalid result') return False allowed = result.release_group.lower() in [x.lower() for x in self.allowlist] or not self.allowlist blocked = result.release_group.lower() in [x.lower() for x in self.blocklist] - logger.log('Result %sallowed%s in block list. Parsed group name: "%s" from result "%s"' % - (('not ', '')[allowed], (', but', ' and not')[not blocked], result.release_group, result.name), - logger.DEBUG) + logger.debug(f'Result {("not ", "")[allowed]}allowed{(", but", " and not")[not blocked]} in block list.' + f' Parsed group name: "{result.release_group}" from result "{result.name}"') return allowed and not blocked @@ -193,29 +191,29 @@ def create_anidb_obj(**kwargs): def set_up_anidb_connection(): if not sickgear.USE_ANIDB: - logger.log(u'Usage of anidb disabled. Skipping', logger.DEBUG) + logger.debug('Usage of anidb disabled. Skipping') return False if not sickgear.ANIDB_USERNAME and not sickgear.ANIDB_PASSWORD: - logger.log(u'anidb username and/or password are not set. Aborting anidb lookup.', logger.DEBUG) + logger.debug('anidb username and/or password are not set. Aborting anidb lookup.') return False if not sickgear.ADBA_CONNECTION: - # anidb_logger = (lambda x: logger.log('ANIDB: ' + str(x)), logger.DEBUG) + # anidb_logger = (lambda x: logger.debug('ANIDB: ' + str(x))) sickgear.ADBA_CONNECTION = adba.Connection(keepAlive=True) # , log=anidb_logger) auth = False try: auth = sickgear.ADBA_CONNECTION.authed() except (BaseException, Exception) as e: - logger.log(u'exception msg: ' + ex(e)) + logger.log(f'exception msg: {ex(e)}') pass if not auth: try: sickgear.ADBA_CONNECTION.auth(sickgear.ANIDB_USERNAME, sickgear.ANIDB_PASSWORD) except (BaseException, Exception) as e: - logger.log(u'exception msg: ' + ex(e)) + logger.log(f'exception msg: {ex(e)}') return False else: return True @@ -230,7 +228,7 @@ def pull_anidb_groups(show_name): anime = create_anidb_obj(name=show_name) return anime.get_groups() except (BaseException, Exception) as e: - logger.log(u'Anidb exception: %s' % ex(e), logger.DEBUG) + logger.debug(f'Anidb exception: {ex(e)}') return False @@ -258,7 +256,7 @@ def push_anidb_mylist(filepath, anidb_episode): log = ('Adding the file to the anidb mylist', logger.DEBUG) result = True except (BaseException, Exception) as e: - log = (u'exception msg: %s' % ex(e), logger.MESSAGE) + log = (f'exception msg: {ex(e)}', logger.MESSAGE) result = False return result, log diff --git a/sickgear/auto_post_processer.py b/sickgear/auto_post_processer.py index dfa97031..124e8b4a 100644 --- a/sickgear/auto_post_processer.py +++ b/sickgear/auto_post_processer.py @@ -38,13 +38,12 @@ class PostProcesser(object): def _main(): if not os.path.isdir(sickgear.TV_DOWNLOAD_DIR): - logger.log(u"Automatic post-processing attempted but dir %s doesn't exist" % sickgear.TV_DOWNLOAD_DIR, - logger.ERROR) + logger.error('Automatic post-processing attempted but dir %s doesn\'t exist' % sickgear.TV_DOWNLOAD_DIR) return if not os.path.isabs(sickgear.TV_DOWNLOAD_DIR): - logger.log(u'Automatic post-processing attempted but dir %s is relative ' - '(and probably not what you really want to process)' % sickgear.TV_DOWNLOAD_DIR, logger.ERROR) + logger.error('Automatic post-processing attempted but dir %s is relative ' + '(and probably not what you really want to process)' % sickgear.TV_DOWNLOAD_DIR) return processTV.processDir(sickgear.TV_DOWNLOAD_DIR, is_basedir=True) diff --git a/sickgear/browser.py b/sickgear/browser.py index aa0e8589..6a902537 100644 --- a/sickgear/browser.py +++ b/sickgear/browser.py @@ -78,7 +78,7 @@ def folders_at_path(path, include_parent=False, include_files=False): try: file_list = get_file_list(path, include_files) except OSError as e: - logger.log('Unable to open %s: %r / %s' % (path, e, ex(e)), logger.WARNING) + logger.warning('Unable to open %s: %r / %s' % (path, e, ex(e))) file_list = get_file_list(parent_path, include_files) file_list = sorted(file_list, key=lambda x: os.path.basename(x['name']).lower()) diff --git a/sickgear/clients/deluge.py b/sickgear/clients/deluge.py index 411315a8..b137f66d 100644 --- a/sickgear/clients/deluge.py +++ b/sickgear/clients/deluge.py @@ -52,7 +52,7 @@ class DelugeAPI(GenericClient): if not connected: hosts = self._post_json({'method': 'web.get_hosts', 'params': [], 'id': 11}) if 0 == len(hosts): - logger.log('%s: WebUI does not contain daemons' % self.name, logger.ERROR) + logger.error('%s: WebUI does not contain daemons' % self.name) return None self._post_json({'method': 'web.connect', 'params': [hosts[0][0]], 'id': 11}, False) @@ -60,7 +60,7 @@ class DelugeAPI(GenericClient): connected = self._post_json({'method': 'web.connected', 'params': [], 'id': 10}) if not connected: - logger.log('%s: WebUI could not connect to daemon' % self.name, logger.ERROR) + logger.error('%s: WebUI could not connect to daemon' % self.name) return None except RequestException: return None @@ -94,7 +94,7 @@ class DelugeAPI(GenericClient): label = sickgear.TORRENT_LABEL if ' ' in label: - logger.log('%s: Invalid label. Label must not contain a space' % self.name, logger.ERROR) + logger.error('%s: Invalid label. Label must not contain a space' % self.name) return False if label: @@ -106,22 +106,21 @@ class DelugeAPI(GenericClient): if None is not labels: if label not in labels: - logger.log('%s: %s label does not exist in Deluge we must add it' % (self.name, label), - logger.DEBUG) + logger.debug('%s: %s label does not exist in Deluge we must add it' % (self.name, label)) self._request_json({ 'method': 'label.add', 'params': [label], 'id': 4}) - logger.log('%s: %s label added to Deluge' % (self.name, label), logger.DEBUG) + logger.debug('%s: %s label added to Deluge' % (self.name, label)) # add label to torrent self._request_json({ 'method': 'label.set_torrent', 'params': [result.hash, label], 'id': 5}) - logger.log('%s: %s label added to torrent' % (self.name, label), logger.DEBUG) + logger.debug('%s: %s label added to torrent' % (self.name, label)) else: - logger.log('%s: label plugin not detected' % self.name, logger.DEBUG) + logger.debug('%s: label plugin not detected' % self.name) return False return True diff --git a/sickgear/clients/download_station.py b/sickgear/clients/download_station.py index 03f95f15..2752ab48 100644 --- a/sickgear/clients/download_station.py +++ b/sickgear/clients/download_station.py @@ -71,7 +71,7 @@ class DownloadStationAPI(GenericClient): # type: (AnyStr) -> None out = '%s%s: %s' % (self.name, (' replied with', '')['Could not' in msg], msg) self._errmsg = '
%s.' % out - logger.log(out, logger.ERROR) + logger.error(out) def _error_task(self, response): @@ -234,7 +234,7 @@ class DownloadStationAPI(GenericClient): i = 0 while retry_ids: for i in tries: - logger.log('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i), logger.DEBUG) + logger.debug('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i)) time.sleep(i) item['fail'] = [] for task in filter(filter_func, self._tinf(retry_ids, err=True)): @@ -246,8 +246,8 @@ class DownloadStationAPI(GenericClient): retry_ids = item['fail'] else: if max(tries) == i: - logger.log('%s: failed to %s %s item(s) after %s tries over %s mins, aborted' % - (self.name, act, len(item['fail']), len(tries), sum(tries) / 60), logger.DEBUG) + logger.debug('%s: failed to %s %s item(s) after %s tries over %s mins, aborted' % + (self.name, act, len(item['fail']), len(tries), sum(tries) / 60)) return (item['fail'] + item['ignore']) or True @@ -261,8 +261,8 @@ class DownloadStationAPI(GenericClient): if 3 <= self._task_version: return self._add_torrent(uri={'uri': search_result.url}) - logger.log('%s: the API at %s doesn\'t support torrent magnet, download skipped' % - (self.name, self.host), logger.WARNING) + logger.warning('%s: the API at %s doesn\'t support torrent magnet, download skipped' % + (self.name, self.host)) def _add_torrent_file(self, search_result): # type: (TorrentSearchResult) -> Union[AnyStr, bool] diff --git a/sickgear/clients/generic.py b/sickgear/clients/generic.py index 143903e9..4ecc5084 100644 --- a/sickgear/clients/generic.py +++ b/sickgear/clients/generic.py @@ -51,7 +51,7 @@ class GenericClient(object): seg = seg[0:c - (len(sample) - 2)] + sample output += ['%s: request %s= %s%s%s' % (self.name, arg, ('', '..')[bool(i)], seg, ('', '..')[i != nch])] - logger.log(output, logger.DEBUG) + logger.debug(output) def _request(self, method='get', params=None, data=None, files=None, **kwargs): @@ -61,7 +61,7 @@ class GenericClient(object): self.last_time = time.time() if not self._get_auth(): - logger.log('%s: Authentication failed' % self.name, logger.ERROR) + logger.error('%s: Authentication failed' % self.name) return False # self._log_request_details(method, params, data, files, **kwargs) @@ -70,31 +70,30 @@ class GenericClient(object): response = self.session.__getattribute__(method)(self.url, params=params, data=data, files=files, timeout=kwargs.pop('timeout', 120), verify=False, **kwargs) except requests.exceptions.ConnectionError as e: - logger.log('%s: Unable to connect %s' % (self.name, ex(e)), logger.ERROR) + logger.error('%s: Unable to connect %s' % (self.name, ex(e))) return False except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL): - logger.log('%s: Invalid host' % self.name, logger.ERROR) + logger.error('%s: Invalid host' % self.name) return False except requests.exceptions.HTTPError as e: - logger.log('%s: Invalid HTTP request %s' % (self.name, ex(e)), logger.ERROR) + logger.error('%s: Invalid HTTP request %s' % (self.name, ex(e))) return False except requests.exceptions.Timeout as e: - logger.log('%s: Connection timeout %s' % (self.name, ex(e)), logger.ERROR) + logger.error('%s: Connection timeout %s' % (self.name, ex(e))) return False except (BaseException, Exception) as e: - logger.log('%s: Unknown exception raised when sending torrent to %s: %s' % (self.name, self.name, ex(e)), - logger.ERROR) + logger.error('%s: Unknown exception raised when sending torrent to %s: %s' % (self.name, self.name, ex(e))) return False if 401 == response.status_code: - logger.log('%s: Invalid username or password, check your config' % self.name, logger.ERROR) + logger.error('%s: Invalid username or password, check your config' % self.name) return False if response.status_code in http_error_code: - logger.log('%s: %s' % (self.name, http_error_code[response.status_code]), logger.DEBUG) + logger.debug('%s: %s' % (self.name, http_error_code[response.status_code])) return False - logger.log('%s: Response to %s request is %s' % (self.name, method.upper(), response.text), logger.DEBUG) + logger.debug('%s: Response to %s request is %s' % (self.name, method.upper(), response.text)) return response @@ -213,10 +212,10 @@ class GenericClient(object): r_code = False - logger.log('Calling %s client' % self.name, logger.DEBUG) + logger.debug('Calling %s client' % self.name) if not self._get_auth(): - logger.log('%s: Authentication failed' % self.name, logger.ERROR) + logger.error('%s: Authentication failed' % self.name) return r_code try: @@ -225,8 +224,8 @@ class GenericClient(object): result = self._get_torrent_hash(result) except (BaseException, Exception) as e: - logger.log('Bad torrent data: hash is %s for [%s]' % (result.hash, result.name), logger.ERROR) - logger.log('Exception raised when checking torrent data: %s' % (ex(e)), logger.DEBUG) + logger.error('Bad torrent data: hash is %s for [%s]' % (result.hash, result.name)) + logger.debug('Exception raised when checking torrent data: %s' % (ex(e))) return r_code try: @@ -237,30 +236,30 @@ class GenericClient(object): self.created_id = isinstance(r_code, string_types) and r_code or None if not r_code: - logger.log('%s: Unable to send torrent to client' % self.name, logger.ERROR) + logger.error('%s: Unable to send torrent to client' % self.name) return False if not self._set_torrent_pause(result): - logger.log('%s: Unable to set the pause for torrent' % self.name, logger.ERROR) + logger.error('%s: Unable to set the pause for torrent' % self.name) if not self._set_torrent_label(result): - logger.log('%s: Unable to set the label for torrent' % self.name, logger.ERROR) + logger.error('%s: Unable to set the label for torrent' % self.name) if not self._set_torrent_ratio(result): - logger.log('%s: Unable to set the ratio for torrent' % self.name, logger.ERROR) + logger.error('%s: Unable to set the ratio for torrent' % self.name) if not self._set_torrent_seed_time(result): - logger.log('%s: Unable to set the seed time for torrent' % self.name, logger.ERROR) + logger.error('%s: Unable to set the seed time for torrent' % self.name) if not self._set_torrent_path(result): - logger.log('%s: Unable to set the path for torrent' % self.name, logger.ERROR) + logger.error('%s: Unable to set the path for torrent' % self.name) if 0 != result.priority and not self._set_torrent_priority(result): - logger.log('%s: Unable to set priority for torrent' % self.name, logger.ERROR) + logger.error('%s: Unable to set priority for torrent' % self.name) except (BaseException, Exception) as e: - logger.log('%s: Failed sending torrent: %s - %s' % (self.name, result.name, result.hash), logger.ERROR) - logger.log('%s: Exception raised when sending torrent: %s' % (self.name, ex(e)), logger.DEBUG) + logger.error('%s: Failed sending torrent: %s - %s' % (self.name, result.name, result.hash)) + logger.debug('%s: Exception raised when sending torrent: %s' % (self.name, ex(e))) return r_code diff --git a/sickgear/clients/qbittorrent.py b/sickgear/clients/qbittorrent.py index b9711e89..6f571128 100644 --- a/sickgear/clients/qbittorrent.py +++ b/sickgear/clients/qbittorrent.py @@ -168,7 +168,7 @@ class QbittorrentAPI(GenericClient): task = self._tinf(t.get('hash'), use_props=False, err=True)[0] return 1 < task.get('priority') or self._ignore_state(task) # then mark fail elif isinstance(response, string_types) and 'queueing' in response.lower(): - logger.log('%s: %s' % (self.name, response), logger.ERROR) + logger.error('%s: %s' % (self.name, response)) return not mark_fail return mark_fail @@ -195,7 +195,7 @@ class QbittorrentAPI(GenericClient): task = self._tinf(t.get('hash'), use_props=False, err=True)[0] return label not in task.get('category') or self._ignore_state(task) # then mark fail elif isinstance(response, string_types) and 'incorrect' in response.lower(): - logger.log('%s: %s. "%s" isn\'t known to qB' % (self.name, response, label), logger.ERROR) + logger.error('%s: %s. "%s" isn\'t known to qB' % (self.name, response, label)) return not mark_fail return mark_fail @@ -312,7 +312,7 @@ class QbittorrentAPI(GenericClient): i = 0 while retry_ids: for i in tries: - logger.log('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i), logger.DEBUG) + logger.debug('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i)) time.sleep(i) item['fail'] = [] for task in filter(filter_func, self._tinf(retry_ids, use_props=False, err=True)): @@ -324,8 +324,8 @@ class QbittorrentAPI(GenericClient): retry_ids = item['fail'] else: if max(tries) == i: - logger.log('%s: failed to %s %s item(s) after %s tries over %s mins, aborted' % - (self.name, act, len(item['fail']), len(tries), sum(tries) / 60), logger.DEBUG) + logger.debug('%s: failed to %s %s item(s) after %s tries over %s mins, aborted' % + (self.name, act, len(item['fail']), len(tries), sum(tries) / 60)) return (item['fail'] + item['ignore']) or True @@ -356,7 +356,7 @@ class QbittorrentAPI(GenericClient): :return: True if created, else Falsy if nothing created """ if self._tinf(data.hash): - logger.log('Could not create task, the hash is already in use', logger.ERROR) + logger.error('Could not create task, the hash is already in use') return label = sickgear.TORRENT_LABEL.replace(' ', '_') @@ -401,7 +401,7 @@ class QbittorrentAPI(GenericClient): authless = bool(re.search('(?i)login|version', cmd)) if authless or self.auth: if not authless and not self._get_auth(): - logger.log('%s: Authentication failed' % self.name, logger.ERROR) + logger.error('%s: Authentication failed' % self.name) return # self._log_request_details('%s%s' % (self.api_ns, cmd.strip('/')), **kwargs) @@ -431,7 +431,7 @@ class QbittorrentAPI(GenericClient): self.api_ns = 'api/v2/' response = self._client_request('auth/login', post_data=post_data, raise_status_code=True) if isinstance(response, string_types) and 'banned' in response.lower(): - logger.log('%s: %s' % (self.name, response), logger.ERROR) + logger.error('%s: %s' % (self.name, response)) response = False elif not response: self.api_ns = '' diff --git a/sickgear/clients/rtorrent.py b/sickgear/clients/rtorrent.py index 332428cd..4643fa46 100644 --- a/sickgear/clients/rtorrent.py +++ b/sickgear/clients/rtorrent.py @@ -43,7 +43,7 @@ class RtorrentAPI(GenericClient): if self.auth: try: if self.auth.has_local_id(data.hash): - logger.log('%s: Item already exists %s' % (self.name, data.name), logger.WARNING) + logger.warning('%s: Item already exists %s' % (self.name, data.name)) raise custom_var = (1, sickgear.TORRENT_LABEL_VAR or '')[0 <= sickgear.TORRENT_LABEL_VAR <= 5] @@ -62,8 +62,8 @@ class RtorrentAPI(GenericClient): if torrent and sickgear.TORRENT_LABEL: label = torrent.get_custom(custom_var) if sickgear.TORRENT_LABEL != label: - logger.log('%s: could not change custom%s label value \'%s\' to \'%s\' for %s' % ( - self.name, custom_var, label, sickgear.TORRENT_LABEL, torrent.name), logger.WARNING) + logger.warning('%s: could not change custom%s label value \'%s\' to \'%s\' for %s' % ( + self.name, custom_var, label, sickgear.TORRENT_LABEL, torrent.name)) except (BaseException, Exception): pass diff --git a/sickgear/clients/transmission.py b/sickgear/clients/transmission.py index 8fb5810a..02a9b8da 100644 --- a/sickgear/clients/transmission.py +++ b/sickgear/clients/transmission.py @@ -86,7 +86,7 @@ class TransmissionAPI(GenericClient): # populate blanked and download_dir if not self._get_auth(): - logger.log('%s: Authentication failed' % self.name, logger.ERROR) + logger.error('%s: Authentication failed' % self.name) return False download_dir = None @@ -95,7 +95,7 @@ class TransmissionAPI(GenericClient): elif self.download_dir: download_dir = self.download_dir else: - logger.log('Path required for Transmission Downloaded files location', logger.ERROR) + logger.error('Path required for Transmission Downloaded files location') if not download_dir and not self.blankable: return False diff --git a/sickgear/common.py b/sickgear/common.py index e66b946a..ae5ecaa2 100644 --- a/sickgear/common.py +++ b/sickgear/common.py @@ -300,7 +300,7 @@ class Quality(object): if not hd_options and full_hd: return Quality.FULLHDBLURAY if sickgear.ANIME_TREAT_AS_HDTV: - logger.log(u'Treating file: %s with "unknown" quality as HDTV per user settings' % name, logger.DEBUG) + logger.debug(f'Treating file: {name} with "unknown" quality as HDTV per user settings') return Quality.HDTV return Quality.UNKNOWN @@ -371,10 +371,10 @@ class Quality(object): try: parser = createParser(filename) except InputStreamError as e: - logger.log(msg % (filename, ex(e)), logger.WARNING) + logger.warning(msg % (filename, ex(e))) except (BaseException, Exception) as e: - logger.log(msg % (filename, ex(e)), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error(msg % (filename, ex(e))) + logger.error(traceback.format_exc()) if parser: extract = None @@ -385,7 +385,7 @@ class Quality(object): parser.parse_comments = False extract = extractMetadata(parser, **args) except (BaseException, Exception) as e: - logger.log(msg % (filename, ex(e)), logger.WARNING) + logger.warning(msg % (filename, ex(e))) if extract: try: height = extract.get('height') diff --git a/sickgear/config.py b/sickgear/config.py index 9fb7aa98..c7adc21a 100644 --- a/sickgear/config.py +++ b/sickgear/config.py @@ -56,7 +56,7 @@ def change_https_cert(https_cert): if os.path.normpath(sickgear.HTTPS_CERT) != os.path.normpath(https_cert): if helpers.make_dir(os.path.dirname(os.path.abspath(https_cert))): sickgear.HTTPS_CERT = os.path.normpath(https_cert) - logger.log(u'Changed https cert path to %s' % https_cert) + logger.log(f'Changed https cert path to {https_cert}') else: return False @@ -71,7 +71,7 @@ def change_https_key(https_key): if os.path.normpath(sickgear.HTTPS_KEY) != os.path.normpath(https_key): if helpers.make_dir(os.path.dirname(os.path.abspath(https_key))): sickgear.HTTPS_KEY = os.path.normpath(https_key) - logger.log(u'Changed https key path to %s' % https_key) + logger.log(f'Changed https key path to {https_key}') else: return False @@ -89,7 +89,7 @@ def change_log_dir(log_dir, web_log): sickgear.LOG_DIR = abs_log_dir logger.sb_log_instance.init_logging() - logger.log(u'Initialized new log file in %s' % sickgear.LOG_DIR) + logger.log(f'Initialized new log file in {sickgear.LOG_DIR}') log_dir_changed = True else: @@ -109,7 +109,7 @@ def change_nzb_dir(nzb_dir): if os.path.normpath(sickgear.NZB_DIR) != os.path.normpath(nzb_dir): if helpers.make_dir(nzb_dir): sickgear.NZB_DIR = os.path.normpath(nzb_dir) - logger.log(u'Changed NZB folder to %s' % nzb_dir) + logger.log(f'Changed NZB folder to {nzb_dir}') else: return False @@ -124,7 +124,7 @@ def change_torrent_dir(torrent_dir): if os.path.normpath(sickgear.TORRENT_DIR) != os.path.normpath(torrent_dir): if helpers.make_dir(torrent_dir): sickgear.TORRENT_DIR = os.path.normpath(torrent_dir) - logger.log(u'Changed torrent folder to %s' % torrent_dir) + logger.log(f'Changed torrent folder to {torrent_dir}') else: return False @@ -139,7 +139,7 @@ def change_tv_download_dir(tv_download_dir): if os.path.normpath(sickgear.TV_DOWNLOAD_DIR) != os.path.normpath(tv_download_dir): if helpers.make_dir(tv_download_dir): sickgear.TV_DOWNLOAD_DIR = os.path.normpath(tv_download_dir) - logger.log(u'Changed TV download folder to %s' % tv_download_dir) + logger.log(f'Changed TV download folder to {tv_download_dir}') else: return False @@ -407,7 +407,7 @@ def check_setting_int(config, cfg_name, item_name, def_val): except (BaseException, Exception): config[cfg_name] = {} config[cfg_name][item_name] = my_val - logger.log('%s -> %s' % (item_name, my_val), logger.DEBUG) + logger.debug('%s -> %s' % (item_name, my_val)) return my_val @@ -422,7 +422,7 @@ def check_setting_float(config, cfg_name, item_name, def_val): config[cfg_name] = {} config[cfg_name][item_name] = my_val - logger.log('%s -> %s' % (item_name, my_val), logger.DEBUG) + logger.debug('%s -> %s' % (item_name, my_val)) return my_val @@ -449,9 +449,9 @@ def check_setting_str(config, cfg_name, item_name, def_val, log=True): config[cfg_name][item_name] = helpers.encrypt(my_val, encryption_version) if log: - logger.log('%s -> %s' % (item_name, my_val), logger.DEBUG) + logger.debug('%s -> %s' % (item_name, my_val)) else: - logger.log('%s -> ******' % item_name, logger.DEBUG) + logger.debug('%s -> ******' % item_name) return (my_val, def_val)['None' == my_val] @@ -497,9 +497,10 @@ class ConfigMigrator(object): if self.config_version > self.expected_config_version: logger.log_error_and_exit( - u'Your config version (%s) has been incremented past what this version of SickGear supports (%s).\n' - 'If you have used other forks or a newer version of SickGear, your config file may be unusable due to ' - 'their modifications.' % (self.config_version, self.expected_config_version)) + f'Your config version ({self.config_version})' + f' has been incremented past what this version of SickGear supports ({self.expected_config_version}).\n' + f'If you have used other forks or a newer version of SickGear,' + f' your config file may be unusable due to their modifications.') sickgear.CONFIG_VERSION = self.config_version @@ -511,20 +512,20 @@ class ConfigMigrator(object): else: migration_name = '' - logger.log(u'Backing up config before upgrade') + logger.log('Backing up config before upgrade') if not helpers.backup_versioned_file(sickgear.CONFIG_FILE, self.config_version): - logger.log_error_and_exit(u'Config backup failed, abort upgrading config') + logger.log_error_and_exit('Config backup failed, abort upgrading config') else: - logger.log(u'Proceeding with upgrade') + logger.log('Proceeding with upgrade') # do the migration, expect a method named _migrate_v - logger.log(u'Migrating config up to version %s %s' % (next_version, migration_name)) + logger.log(f'Migrating config up to version {next_version} {migration_name}') getattr(self, '_migrate_v%s' % next_version)() self.config_version = next_version # save new config after migration sickgear.CONFIG_VERSION = self.config_version - logger.log(u'Saving config file to disk') + logger.log('Saving config file to disk') sickgear.save_config() @staticmethod @@ -569,17 +570,17 @@ class ConfigMigrator(object): new_season_format = str(new_season_format).replace('09', '%0S') new_season_format = new_season_format.replace('9', '%S') - logger.log(u'Changed season folder format from %s to %s, prepending it to your naming config' % - (old_season_format, new_season_format)) + logger.log(f'Changed season folder format from {old_season_format} to {new_season_format},' + f' prepending it to your naming config') sickgear.NAMING_PATTERN = new_season_format + os.sep + sickgear.NAMING_PATTERN except (TypeError, ValueError): - logger.log(u'Can not change %s to new season format' % old_season_format, logger.ERROR) + logger.error(f'Can not change {old_season_format} to new season format') # if no shows had it on then don't flatten any shows and don't put season folders in the config else: - logger.log(u'No shows were using season folders before so I am disabling flattening on all shows') + logger.log('No shows were using season folders before so I am disabling flattening on all shows') # don't flatten any shows at all my_db.action('UPDATE tv_shows SET flatten_folders = 0 WHERE 1=1') @@ -672,8 +673,7 @@ class ConfigMigrator(object): try: name, url, key, enabled = cur_provider_data.split('|') except ValueError: - logger.log(u'Skipping Newznab provider string: "%s", incorrect format' % cur_provider_data, - logger.ERROR) + logger.error(f'Skipping Newznab provider string: "{cur_provider_data}", incorrect format') continue cat_ids = '5030,5040,5060' @@ -727,7 +727,7 @@ class ConfigMigrator(object): cur_metadata = metadata.split('|') # if target has the old number of values, do upgrade if 6 == len(cur_metadata): - logger.log(u'Upgrading ' + metadata_name + ' metadata, old value: ' + metadata) + logger.log('Upgrading ' + metadata_name + ' metadata, old value: ' + metadata) cur_metadata.insert(4, '0') cur_metadata.append('0') cur_metadata.append('0') @@ -740,15 +740,15 @@ class ConfigMigrator(object): cur_metadata[4], cur_metadata[3] = cur_metadata[3], '0' # write new format metadata = '|'.join(cur_metadata) - logger.log(u'Upgrading %s metadata, new value: %s' % (metadata_name, metadata)) + logger.log(f'Upgrading {metadata_name} metadata, new value: {metadata}') elif 10 == len(cur_metadata): metadata = '|'.join(cur_metadata) - logger.log(u'Keeping %s metadata, value: %s' % (metadata_name, metadata)) + logger.log(f'Keeping {metadata_name} metadata, value: {metadata}') else: - logger.log(u'Skipping %s: "%s", incorrect format' % (metadata_name, metadata), logger.ERROR) + logger.error(f'Skipping {metadata_name}: "{metadata}", incorrect format') metadata = '0|0|0|0|0|0|0|0|0|0' - logger.log(u'Setting %s metadata, new value: %s' % (metadata_name, metadata)) + logger.log(f'Setting {metadata_name} metadata, new value: {metadata}') return metadata diff --git a/sickgear/databases/mainDB.py b/sickgear/databases/mainDB.py index c51e3108..b5fbcc93 100644 --- a/sickgear/databases/mainDB.py +++ b/sickgear/databases/mainDB.py @@ -86,7 +86,7 @@ class MainSanityCheck(db.DBSanityCheck): if 0 < len(cl): self.connection.mass_action(cl) - logger.log(u'Performing a vacuum on the database.', logger.DEBUG) + logger.debug('Performing a vacuum on the database.') self.connection.upgrade_log(fix_msg % 'VACUUM') self.connection.action('VACUUM') self.connection.upgrade_log(fix_msg % 'finished') @@ -111,8 +111,7 @@ class MainSanityCheck(db.DBSanityCheck): for cur_result in sql_result: - logger.log(u'Duplicate show detected! %s: %s count: %s' % ( - column, cur_result[column], cur_result['count']), logger.DEBUG) + logger.debug(f'Duplicate show detected! {column}: {cur_result[column]} count: {cur_result["count"]}') cur_dupe_results = self.connection.select( 'SELECT show_id, ' + column + ' FROM tv_shows WHERE ' + column + ' = ? LIMIT ?', @@ -121,15 +120,15 @@ class MainSanityCheck(db.DBSanityCheck): cl = [] for cur_dupe_id in cur_dupe_results: - logger.log(u'Deleting duplicate show with %s: %s show_id: %s' % ( - column, cur_dupe_id[column], cur_dupe_id['show_id'])) + logger.log(f'Deleting duplicate show with {column}: {cur_dupe_id[column]}' + f' show_id: {cur_dupe_id["show_id"]}') cl.append(['DELETE FROM tv_shows WHERE show_id = ?', [cur_dupe_id['show_id']]]) if 0 < len(cl): self.connection.mass_action(cl) else: - logger.log(u'No duplicate show, check passed') + logger.log('No duplicate show, check passed') def fix_duplicate_episodes(self): @@ -146,9 +145,9 @@ class MainSanityCheck(db.DBSanityCheck): for cur_result in sql_result: - logger.log(u'Duplicate episode detected! prod_id: %s season: %s episode: %s count: %s' % - (cur_result['prod_id'], cur_result['season'], cur_result['episode'], - cur_result['count']), logger.DEBUG) + logger.debug(f'Duplicate episode detected! prod_id: {cur_result["prod_id"]}' + f' season: {cur_result["season"]} episode: {cur_result["episode"]}' + f' count: {cur_result["count"]}') cur_dupe_results = self.connection.select( 'SELECT episode_id' @@ -163,14 +162,14 @@ class MainSanityCheck(db.DBSanityCheck): cl = [] for cur_dupe_id in cur_dupe_results: - logger.log(u'Deleting duplicate episode with episode_id: %s' % cur_dupe_id['episode_id']) - cl.append(['DELETE FROM tv_episodes WHERE episode_id = ?', [cur_dupe_id['episode_id']]]) + logger.log(f'Deleting duplicate episode with episode_id: {cur_dupe_id["episode_id"]}') + cl.append(['DELETE FROM tv_episodes WHERE episode_id = ?', [cur_dupe_id["episode_id"]]]) if 0 < len(cl): self.connection.mass_action(cl) else: - logger.log(u'No duplicate episode, check passed') + logger.log('No duplicate episode, check passed') def fix_orphan_episodes(self): @@ -182,16 +181,16 @@ class MainSanityCheck(db.DBSanityCheck): cl = [] for cur_result in sql_result: - logger.log(u'Orphan episode detected! episode_id: %s showid: %s' % ( - cur_result['episode_id'], cur_result['showid']), logger.DEBUG) - logger.log(u'Deleting orphan episode with episode_id: %s' % cur_result['episode_id']) + logger.debug(f'Orphan episode detected! episode_id: {cur_result["episode_id"]}' + f' showid: {cur_result["showid"]}') + logger.log(f'Deleting orphan episode with episode_id: {cur_result["episode_id"]}') cl.append(['DELETE FROM tv_episodes WHERE episode_id = ?', [cur_result['episode_id']]]) if 0 < len(cl): self.connection.mass_action(cl) else: - logger.log(u'No orphan episodes, check passed') + logger.log('No orphan episodes, check passed') def fix_missing_table_indexes(self): if not self.connection.select('PRAGMA index_info("idx_indexer_id")'): @@ -240,9 +239,9 @@ class MainSanityCheck(db.DBSanityCheck): cl = [] for cur_result in sql_result: - logger.log(u'UNAIRED episode detected! episode_id: %s showid: %s' % ( - cur_result['episode_id'], cur_result['showid']), logger.DEBUG) - logger.log(u'Fixing unaired episode status with episode_id: %s' % cur_result['episode_id']) + logger.debug(f'UNAIRED episode detected! episode_id: {cur_result["episode_id"]}' + f' showid: {cur_result["showid"]}') + logger.log(f'Fixing unaired episode status with episode_id: {cur_result["episode_id"]}') cl.append(['UPDATE tv_episodes SET status = ? WHERE episode_id = ?', [common.UNAIRED, cur_result['episode_id']]]) @@ -250,7 +249,7 @@ class MainSanityCheck(db.DBSanityCheck): self.connection.mass_action(cl) else: - logger.log(u'No UNAIRED episodes, check passed') + logger.log('No UNAIRED episodes, check passed') def fix_scene_exceptions(self): @@ -387,21 +386,17 @@ class InitialSchema(db.SchemaUpgrade): if cur_db_version < MIN_DB_VERSION: logger.log_error_and_exit( - u'Your database version (' + str(cur_db_version) - + ') is too old to migrate from what this version of SickGear supports (' - + str(MIN_DB_VERSION) + ').' + "\n" + f'Your database version ({cur_db_version}) is too old to migrate from' + f' what this version of SickGear supports ({MIN_DB_VERSION}).\n' + 'Upgrade using a previous version (tag) build 496 to build 501 of SickGear' - ' first or remove database file to begin fresh.' - ) + ' first or remove database file to begin fresh.') if cur_db_version > MAX_DB_VERSION: logger.log_error_and_exit( - u'Your database version (' + str(cur_db_version) - + ') has been incremented past what this version of SickGear supports (' - + str(MAX_DB_VERSION) + ').\n' + f'Your database version ({cur_db_version}) has been incremented past' + f' what this version of SickGear supports ({MAX_DB_VERSION}).\n' + 'If you have used other forks of SickGear,' - ' your database may be unusable due to their modifications.' - ) + ' your database may be unusable due to their modifications.') return self.call_check_db_version() @@ -423,7 +418,7 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): sql_result = self.connection.select('SELECT episode_id, location, file_size FROM tv_episodes') - self.upgrade_log(u'Adding file size to all episodes in DB, please be patient') + self.upgrade_log('Adding file size to all episodes in DB, please be patient') for cur_result in sql_result: if not cur_result['location']: continue @@ -439,7 +434,7 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): # noinspection SqlRedundantOrderingDirection history_sql_result = self.connection.select('SELECT * FROM history WHERE provider != -1 ORDER BY date ASC') - self.upgrade_log(u'Adding release name to all episodes still in history') + self.upgrade_log('Adding release name to all episodes still in history') for cur_result in history_sql_result: # find the associated download, if there isn't one then ignore it # noinspection SqlResolve @@ -449,8 +444,8 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): ' WHERE provider = -1 AND showid = ? AND season = ? AND episode = ? AND date > ?', [cur_result['showid'], cur_result['season'], cur_result['episode'], cur_result['date']]) if not download_sql_result: - self.upgrade_log(u'Found a snatch in the history for ' + cur_result['resource'] - + ' but couldn\'t find the associated download, skipping it', logger.DEBUG) + self.upgrade_log(f'Found a snatch in the history for {cur_result["resource"]}' + f' but couldn\'t find the associated download, skipping it', logger.DEBUG) continue nzb_name = cur_result['resource'] @@ -468,9 +463,8 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): ' WHERE showid = ? AND season = ? AND episode = ? AND location != ""', [cur_result['showid'], cur_result['season'], cur_result['episode']]) if not sql_result: - logger.log( - u'The episode ' + nzb_name + ' was found in history but doesn\'t exist on disk anymore, skipping', - logger.DEBUG) + logger.debug(f'The episode {nzb_name} was found in history but doesn\'t exist on disk anymore,' + f' skipping') continue # get the status/quality of the existing ep and make sure it's what we expect @@ -483,7 +477,7 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): # make sure this is actually a real release name and not a season pack or something for cur_name in (nzb_name, file_name): - logger.log(u'Checking if ' + cur_name + ' is actually a good release name', logger.DEBUG) + logger.debug(f'Checking if {cur_name} is actually a good release name') try: np = NameParser(False) parse_result = np.parse(cur_name) @@ -503,7 +497,7 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): ' FROM tv_episodes' ' WHERE release_name = ""') - self.upgrade_log(u'Adding release name to all episodes with obvious scene filenames') + self.upgrade_log('Adding release name to all episodes with obvious scene filenames') for cur_result in empty_sql_result: ep_file_name = os.path.basename(cur_result['location']) @@ -522,9 +516,7 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): if not parse_result.release_group: continue - logger.log( - u'Name ' + ep_file_name + ' gave release group of ' + parse_result.release_group + ', seems valid', - logger.DEBUG) + logger.debug(f'Name {ep_file_name} gave release group of {parse_result.release_group}, seems valid') self.connection.action('UPDATE tv_episodes SET release_name = ? WHERE episode_id = ?', [ep_file_name, cur_result['episode_id']]) @@ -651,7 +643,7 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): common.Quality.UNKNOWN], []) # update qualities (including templates) - self.upgrade_log(u'[1/4] Updating pre-defined templates and the quality for each show...') + self.upgrade_log('[1/4] Updating pre-defined templates and the quality for each show...') cl = [] shows = self.connection.select('SELECT * FROM tv_shows') for cur_show in shows: @@ -666,7 +658,7 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): # update status that are are within the old hdwebdl # (1<<3 which is 8) and better -- exclude unknown (1<<15 which is 32768) - self.upgrade_log(u'[2/4] Updating the status for the episodes within each show...') + self.upgrade_log('[2/4] Updating the status for the episodes within each show...') cl = [] sql_result = self.connection.select('SELECT * FROM tv_episodes WHERE status < 3276800 AND status >= 800') for cur_result in sql_result: @@ -678,7 +670,7 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): # may not always coordinate together # update previous history so it shows the correct action - self.upgrade_log(u'[3/4] Updating history to reflect the correct action...') + self.upgrade_log('[3/4] Updating history to reflect the correct action...') cl = [] # noinspection SqlResolve history_action = self.connection.select('SELECT * FROM history WHERE action < 3276800 AND action >= 800') @@ -688,7 +680,7 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): self.connection.mass_action(cl) # update previous history so it shows the correct quality - self.upgrade_log(u'[4/4] Updating history to reflect the correct quality...') + self.upgrade_log('[4/4] Updating history to reflect the correct quality...') cl = [] # noinspection SqlResolve history_quality = self.connection.select('SELECT * FROM history WHERE quality < 32768 AND quality >= 8') @@ -700,7 +692,7 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): self.inc_db_version() # cleanup and reduce db if any previous data was removed - self.upgrade_log(u'Performing a vacuum on the database.', logger.DEBUG) + self.upgrade_log('Performing a vacuum on the database.', logger.DEBUG) self.connection.action('VACUUM') return self.call_check_db_version() @@ -712,10 +704,10 @@ class AddShowidTvdbidIndex(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Checking for duplicate shows before adding unique index.') + self.upgrade_log('Checking for duplicate shows before adding unique index.') MainSanityCheck(self.connection).fix_duplicate_shows('tvdb_id') - self.upgrade_log(u'Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries.') + self.upgrade_log('Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries.') if not self.has_table('idx_showid'): self.connection.action('CREATE INDEX idx_showid ON tv_episodes (showid);') if not self.has_table('idx_tvdb_id'): @@ -732,7 +724,7 @@ class AddLastUpdateTVDB(db.SchemaUpgrade): def execute(self): if not self.has_column('tv_shows', 'last_update_tvdb'): - self.upgrade_log(u'Adding column last_update_tvdb to tv_shows') + self.upgrade_log('Adding column last_update_tvdb to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'last_update_tvdb', default=1) @@ -745,7 +737,7 @@ class AddDBIncreaseTo15(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version to v%s' % self.call_check_db_version()) + self.upgrade_log(f'Bumping database version to v{self.call_check_db_version()}') self.inc_db_version() return self.call_check_db_version() @@ -756,7 +748,7 @@ class AddIMDbInfo(db.SchemaUpgrade): db_backed_up = False if not self.has_table('imdb_info'): - self.upgrade_log(u'Creating IMDb table imdb_info') + self.upgrade_log('Creating IMDb table imdb_info') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True @@ -766,7 +758,7 @@ class AddIMDbInfo(db.SchemaUpgrade): ' rating TEXT, votes INTEGER, last_update NUMERIC)') if not self.has_column('tv_shows', 'imdb_id'): - self.upgrade_log(u'Adding IMDb column imdb_id to tv_shows') + self.upgrade_log('Adding IMDb column imdb_id to tv_shows') if not db_backed_up: db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) @@ -786,7 +778,7 @@ class AddProperNamingSupport(db.SchemaUpgrade): return self.set_db_version(5816) if not self.has_column('tv_episodes', 'is_proper'): - self.upgrade_log(u'Adding column is_proper to tv_episodes') + self.upgrade_log('Adding column is_proper to tv_episodes') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_episodes', 'is_proper') @@ -805,7 +797,7 @@ class AddEmailSubscriptionTable(db.SchemaUpgrade): return self.set_db_version(5817) if not self.has_column('tv_shows', 'notify_list'): - self.upgrade_log(u'Adding column notify_list to tv_shows') + self.upgrade_log('Adding column notify_list to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'notify_list', 'TEXT', None) @@ -827,7 +819,7 @@ class AddProperSearch(db.SchemaUpgrade): return self.set_db_version(5818) if not self.has_column('info', 'last_proper_search'): - self.upgrade_log(u'Adding column last_proper_search to info') + self.upgrade_log('Adding column last_proper_search to info') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('info', 'last_proper_search', default=1) @@ -839,7 +831,7 @@ class AddProperSearch(db.SchemaUpgrade): class AddDvdOrderOption(db.SchemaUpgrade): def execute(self): if not self.has_column('tv_shows', 'dvdorder'): - self.upgrade_log(u'Adding column dvdorder to tv_shows') + self.upgrade_log('Adding column dvdorder to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'dvdorder', 'NUMERIC', '0') @@ -851,7 +843,7 @@ class AddDvdOrderOption(db.SchemaUpgrade): class AddSubtitlesSupport(db.SchemaUpgrade): def execute(self): if not self.has_column('tv_shows', 'subtitles'): - self.upgrade_log(u'Adding subtitles to tv_shows and tv_episodes') + self.upgrade_log('Adding subtitles to tv_shows and tv_episodes') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'subtitles') self.add_column('tv_episodes', 'subtitles', 'TEXT', '') @@ -867,10 +859,10 @@ class ConvertTVShowsToIndexerScheme(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Converting TV Shows table to Indexer Scheme...') + self.upgrade_log('Converting TV Shows table to Indexer Scheme...') if self.has_table('tmp_tv_shows'): - self.upgrade_log(u'Removing temp tv show tables left behind from previous updates...') + self.upgrade_log('Removing temp tv show tables left behind from previous updates...') # noinspection SqlResolve self.connection.action('DROP TABLE tmp_tv_shows') @@ -908,10 +900,10 @@ class ConvertTVEpisodesToIndexerScheme(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Converting TV Episodes table to Indexer Scheme...') + self.upgrade_log('Converting TV Episodes table to Indexer Scheme...') if self.has_table('tmp_tv_episodes'): - self.upgrade_log(u'Removing temp tv episode tables left behind from previous updates...') + self.upgrade_log('Removing temp tv episode tables left behind from previous updates...') # noinspection SqlResolve self.connection.action('DROP TABLE tmp_tv_episodes') @@ -949,10 +941,10 @@ class ConvertIMDBInfoToIndexerScheme(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Converting IMDb Info table to Indexer Scheme...') + self.upgrade_log('Converting IMDb Info table to Indexer Scheme...') if self.has_table('tmp_imdb_info'): - self.upgrade_log(u'Removing temp imdb info tables left behind from previous updates...') + self.upgrade_log('Removing temp imdb info tables left behind from previous updates...') # noinspection SqlResolve self.connection.action('DROP TABLE tmp_imdb_info') @@ -978,10 +970,10 @@ class ConvertInfoToIndexerScheme(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Converting Info table to Indexer Scheme...') + self.upgrade_log('Converting Info table to Indexer Scheme...') if self.has_table('tmp_info'): - self.upgrade_log(u'Removing temp info tables left behind from previous updates...') + self.upgrade_log('Removing temp info tables left behind from previous updates...') # noinspection SqlResolve self.connection.action('DROP TABLE tmp_info') @@ -1005,7 +997,7 @@ class AddArchiveFirstMatchOption(db.SchemaUpgrade): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) if not self.has_column('tv_shows', 'archive_firstmatch'): - self.upgrade_log(u'Adding column archive_firstmatch to tv_shows') + self.upgrade_log('Adding column archive_firstmatch to tv_shows') self.add_column('tv_shows', 'archive_firstmatch', 'NUMERIC', '0') self.inc_db_version() @@ -1020,7 +1012,7 @@ class AddSceneNumbering(db.SchemaUpgrade): if self.has_table('scene_numbering'): self.connection.action('DROP TABLE scene_numbering') - self.upgrade_log(u'Upgrading table scene_numbering ...') + self.upgrade_log('Upgrading table scene_numbering ...') self.connection.action( 'CREATE TABLE scene_numbering (indexer TEXT, indexer_id INTEGER, season INTEGER, episode INTEGER,' ' scene_season INTEGER, scene_episode INTEGER,' @@ -1036,7 +1028,7 @@ class ConvertIndexerToInteger(db.SchemaUpgrade): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) cl = [] - self.upgrade_log(u'Converting Indexer to Integer ...') + self.upgrade_log('Converting Indexer to Integer ...') cl.append(['UPDATE tv_shows SET indexer = ? WHERE LOWER(indexer) = ?', ['1', 'tvdb']]) cl.append(['UPDATE tv_shows SET indexer = ? WHERE LOWER(indexer) = ?', ['2', 'tvrage']]) cl.append(['UPDATE tv_episodes SET indexer = ? WHERE LOWER(indexer) = ?', ['1', 'tvdb']]) @@ -1060,13 +1052,13 @@ class AddRequireAndIgnoreWords(db.SchemaUpgrade): db_backed_up = False if not self.has_column('tv_shows', 'rls_require_words'): - self.upgrade_log(u'Adding column rls_require_words to tv_shows') + self.upgrade_log('Adding column rls_require_words to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True self.add_column('tv_shows', 'rls_require_words', 'TEXT', '') if not self.has_column('tv_shows', 'rls_ignore_words'): - self.upgrade_log(u'Adding column rls_ignore_words to tv_shows') + self.upgrade_log('Adding column rls_ignore_words to tv_shows') if not db_backed_up: db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'rls_ignore_words', 'TEXT', '') @@ -1080,14 +1072,14 @@ class AddSportsOption(db.SchemaUpgrade): def execute(self): db_backed_up = False if not self.has_column('tv_shows', 'sports'): - self.upgrade_log(u'Adding column sports to tv_shows') + self.upgrade_log('Adding column sports to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True self.add_column('tv_shows', 'sports', 'NUMERIC', '0') if self.has_column('tv_shows', 'air_by_date') and self.has_column('tv_shows', 'sports'): # update sports column - self.upgrade_log(u'[4/4] Updating tv_shows to reflect the correct sports value...') + self.upgrade_log('[4/4] Updating tv_shows to reflect the correct sports value...') if not db_backed_up: db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) cl = [] @@ -1108,7 +1100,7 @@ class AddSceneNumberingToTvEpisodes(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding columns scene_season and scene_episode to tvepisodes') + self.upgrade_log('Adding columns scene_season and scene_episode to tvepisodes') self.add_column('tv_episodes', 'scene_season', 'NUMERIC', 'NULL') self.add_column('tv_episodes', 'scene_episode', 'NUMERIC', 'NULL') @@ -1121,7 +1113,7 @@ class AddAnimeTVShow(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding column anime to tv_episodes') + self.upgrade_log('Adding column anime to tv_episodes') self.add_column('tv_shows', 'anime', 'NUMERIC', '0') self.inc_db_version() @@ -1133,7 +1125,7 @@ class AddAbsoluteNumbering(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding column absolute_number to tv_episodes') + self.upgrade_log('Adding column absolute_number to tv_episodes') self.add_column('tv_episodes', 'absolute_number', 'NUMERIC', '0') self.inc_db_version() @@ -1145,7 +1137,7 @@ class AddSceneAbsoluteNumbering(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding columns absolute_number and scene_absolute_number to scene_numbering') + self.upgrade_log('Adding columns absolute_number and scene_absolute_number to scene_numbering') self.add_column('scene_numbering', 'absolute_number', 'NUMERIC', '0') self.add_column('scene_numbering', 'scene_absolute_number', 'NUMERIC', '0') @@ -1160,7 +1152,7 @@ class AddAnimeAllowlistBlocklist(db.SchemaUpgrade): cl = [['CREATE TABLE allowlist (show_id INTEGER, range TEXT, keyword TEXT, indexer NUMERIC)'], ['CREATE TABLE blocklist (show_id INTEGER, range TEXT, keyword TEXT, indexer NUMERIC)']] - self.upgrade_log(u'Creating tables for anime allow and block lists') + self.upgrade_log('Creating tables for anime allow and block lists') self.connection.mass_action(cl) self.inc_db_version() @@ -1172,7 +1164,7 @@ class AddSceneAbsoluteNumbering2(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding column scene_absolute_number to tv_episodes') + self.upgrade_log('Adding column scene_absolute_number to tv_episodes') self.add_column('tv_episodes', 'scene_absolute_number', 'NUMERIC', '0') self.inc_db_version() @@ -1184,7 +1176,7 @@ class AddXemRefresh(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Creating table xem_refresh') + self.upgrade_log('Creating table xem_refresh') self.connection.action( 'CREATE TABLE xem_refresh (indexer TEXT, indexer_id INTEGER PRIMARY KEY, last_refreshed INTEGER)') @@ -1197,7 +1189,7 @@ class AddSceneToTvShows(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding column scene to tv_shows') + self.upgrade_log('Adding column scene to tv_shows') self.add_column('tv_shows', 'scene', 'NUMERIC', '0') self.inc_db_version() @@ -1212,7 +1204,7 @@ class AddIndexerMapping(db.SchemaUpgrade): if self.has_table('indexer_mapping'): self.connection.action('DROP TABLE indexer_mapping') - self.upgrade_log(u'Adding table indexer_mapping') + self.upgrade_log('Adding table indexer_mapping') self.connection.action( 'CREATE TABLE indexer_mapping (indexer_id INTEGER, indexer NUMERIC, mindexer_id INTEGER, mindexer NUMERIC,' ' PRIMARY KEY (indexer_id, indexer))') @@ -1226,11 +1218,11 @@ class AddVersionToTvEpisodes(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding columns release_group and version to tv_episodes') + self.upgrade_log('Adding columns release_group and version to tv_episodes') self.add_column('tv_episodes', 'release_group', 'TEXT', '') self.add_column('tv_episodes', 'version', 'NUMERIC', '-1') - self.upgrade_log(u'Adding column version to history') + self.upgrade_log('Adding column version to history') self.add_column('history', 'version', 'NUMERIC', '-1') self.inc_db_version() @@ -1242,7 +1234,7 @@ class BumpDatabaseVersion(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version') + self.upgrade_log('Bumping database version') return self.set_db_version(10000) @@ -1252,7 +1244,7 @@ class Migrate41(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version') + self.upgrade_log('Bumping database version') return self.set_db_version(10001) @@ -1267,7 +1259,7 @@ class Migrate43(db.SchemaUpgrade): if self.has_table(table): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True - self.upgrade_log(u'Dropping redundant table tmdb_info') + self.upgrade_log('Dropping redundant table tmdb_info') # noinspection SqlResolve self.connection.action('DROP TABLE [%s]' % table) db_chg = True @@ -1276,7 +1268,7 @@ class Migrate43(db.SchemaUpgrade): if not db_backed_up: db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True - self.upgrade_log(u'Dropping redundant tmdb_info refs') + self.upgrade_log('Dropping redundant tmdb_info refs') self.drop_columns('tv_shows', 'tmdb_id') db_chg = True @@ -1288,7 +1280,7 @@ class Migrate43(db.SchemaUpgrade): self.connection.action('INSERT INTO db_version (db_version) VALUES (0);') if not db_chg: - self.upgrade_log(u'Bumping database version') + self.upgrade_log('Bumping database version') return self.set_db_version(10001) @@ -1298,7 +1290,7 @@ class Migrate4301(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version') + self.upgrade_log('Bumping database version') return self.set_db_version(10002) @@ -1308,7 +1300,7 @@ class Migrate4302(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version') + self.upgrade_log('Bumping database version') return self.set_db_version(10003) @@ -1318,7 +1310,7 @@ class MigrateUpstream(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Migrate SickBeard db v%s into v15' % str(self.call_check_db_version()).replace('58', '')) + self.upgrade_log(f'Migrate SickBeard db v{self.call_check_db_version().replace("58", "")} into v15') return self.set_db_version(15) @@ -1328,7 +1320,7 @@ class SickGearDatabaseVersion(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version to new SickGear standards') + self.upgrade_log('Bumping database version to new SickGear standards') return self.set_db_version(20000) @@ -1338,7 +1330,7 @@ class RemoveDefaultEpStatusFromTvShows(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Dropping redundant column default_ep_status from tv_shows') + self.upgrade_log('Dropping redundant column default_ep_status from tv_shows') self.drop_columns('tv_shows', 'default_ep_status') return self.set_db_version(10000) @@ -1349,7 +1341,7 @@ class RemoveMinorDBVersion(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Dropping redundant column db_minor_version from db_version') + self.upgrade_log('Dropping redundant column db_minor_version from db_version') self.drop_columns('db_version', 'db_minor_version') return self.set_db_version(10001) @@ -1359,7 +1351,7 @@ class RemoveMinorDBVersion(db.SchemaUpgrade): class RemoveMetadataSub(db.SchemaUpgrade): def execute(self): if self.has_column('tv_shows', 'sub_use_sr_metadata'): - self.upgrade_log(u'Dropping redundant column metadata sub') + self.upgrade_log('Dropping redundant column metadata sub') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.drop_columns('tv_shows', 'sub_use_sr_metadata') @@ -1371,10 +1363,10 @@ class DBIncreaseTo20001(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version to force a backup before new database code') + self.upgrade_log('Bumping database version to force a backup before new database code') self.connection.action('VACUUM') - self.upgrade_log(u'Performed a vacuum on the database', logger.DEBUG) + self.upgrade_log('Performed a vacuum on the database', logger.DEBUG) return self.set_db_version(20001) @@ -1383,7 +1375,7 @@ class DBIncreaseTo20001(db.SchemaUpgrade): class AddTvShowOverview(db.SchemaUpgrade): def execute(self): if not self.has_column('tv_shows', 'overview'): - self.upgrade_log(u'Adding column overview to tv_shows') + self.upgrade_log('Adding column overview to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'overview', 'TEXT', '') @@ -1394,7 +1386,7 @@ class AddTvShowOverview(db.SchemaUpgrade): class AddTvShowTags(db.SchemaUpgrade): def execute(self): if not self.has_column('tv_shows', 'tag'): - self.upgrade_log(u'Adding tag to tv_shows') + self.upgrade_log('Adding tag to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'tag', 'TEXT', 'Show List') @@ -1410,7 +1402,7 @@ class ChangeMapIndexer(db.SchemaUpgrade): if self.has_table('indexer_mapping'): self.connection.action('DROP TABLE indexer_mapping') - self.upgrade_log(u'Changing table indexer_mapping') + self.upgrade_log('Changing table indexer_mapping') self.connection.action( 'CREATE TABLE indexer_mapping (indexer_id INTEGER, indexer NUMERIC, mindexer_id INTEGER NOT NULL,' ' mindexer NUMERIC, date NUMERIC NOT NULL DEFAULT 0, status INTEGER NOT NULL DEFAULT 0,' @@ -1422,7 +1414,7 @@ class ChangeMapIndexer(db.SchemaUpgrade): self.upgrade_log('Adding last_run_backlog to info') self.add_column('info', 'last_run_backlog', 'NUMERIC', 1) - self.upgrade_log(u'Moving table scene_exceptions from cache.db to sickbeard.db') + self.upgrade_log('Moving table scene_exceptions from cache.db to sickbeard.db') if self.has_table('scene_exceptions_refresh'): self.connection.action('DROP TABLE scene_exceptions_refresh') self.connection.action('CREATE TABLE scene_exceptions_refresh (list TEXT PRIMARY KEY, last_refreshed INTEGER)') @@ -1467,7 +1459,7 @@ class ChangeMapIndexer(db.SchemaUpgrade): class AddShowNotFoundCounter(db.SchemaUpgrade): def execute(self): if not self.has_table('tv_shows_not_found'): - self.upgrade_log(u'Adding table tv_shows_not_found') + self.upgrade_log('Adding table tv_shows_not_found') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.connection.action( @@ -1482,7 +1474,7 @@ class AddShowNotFoundCounter(db.SchemaUpgrade): class AddFlagTable(db.SchemaUpgrade): def execute(self): if not self.has_table('flags'): - self.upgrade_log(u'Adding table flags') + self.upgrade_log('Adding table flags') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.connection.action('CREATE TABLE flags (flag PRIMARY KEY NOT NULL )') @@ -1494,7 +1486,7 @@ class AddFlagTable(db.SchemaUpgrade): class DBIncreaseTo20007(db.SchemaUpgrade): def execute(self): - self.upgrade_log(u'Bumping database version') + self.upgrade_log('Bumping database version') return self.set_db_version(20007) @@ -1517,7 +1509,7 @@ class AddWatched(db.SchemaUpgrade): self.connection.action('VACUUM') if not self.has_table('tv_episodes_watched'): - self.upgrade_log(u'Adding table tv_episodes_watched') + self.upgrade_log('Adding table tv_episodes_watched') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.connection.action( @@ -1561,7 +1553,7 @@ class AddIndexerToTables(db.SchemaUpgrade): for t in [(allowtbl, 'show_id'), (blocktbl, 'show_id'), ('history', 'showid'), ('scene_exceptions', 'indexer_id')]: if not self.has_column(t[0], 'indexer'): - self.upgrade_log(u'Adding TV info support to %s table' % t[0]) + self.upgrade_log(f'Adding TV info support to {t[0]} table') self.add_column(t[0], 'indexer') cl = [] for s_id, i in iteritems(show_ids): diff --git a/sickgear/db.py b/sickgear/db.py index 2e70ba16..c1efaed1 100644 --- a/sickgear/db.py +++ b/sickgear/db.py @@ -132,21 +132,21 @@ class DBConnection(object): :return: success, message """ if not db_supports_backup: - logger.log('this python sqlite3 version doesn\'t support backups', logger.DEBUG) + logger.debug('this python sqlite3 version doesn\'t support backups') return False, 'this python sqlite3 version doesn\'t support backups' if not os.path.isdir(target): - logger.log('Backup target invalid', logger.ERROR) + logger.error('Backup target invalid') return False, 'Backup target invalid' target_db = os.path.join(target, (backup_filename, self.filename)[None is backup_filename]) if os.path.exists(target_db): - logger.log('Backup target file already exists', logger.ERROR) + logger.error('Backup target file already exists') return False, 'Backup target file already exists' # noinspection PyUnusedLocal def progress(status, remaining, total): - logger.log('Copied %s of %s pages...' % (total - remaining, total), logger.DEBUG) + logger.debug('Copied %s of %s pages...' % (total - remaining, total)) backup_con = None @@ -156,9 +156,9 @@ class DBConnection(object): with backup_con: with db_lock: self.connection.backup(backup_con, progress=progress) - logger.log('%s backup successful' % self.filename, logger.DEBUG) + logger.debug('%s backup successful' % self.filename) except sqlite3.Error as error: - logger.log("Error while taking backup: %s" % ex(error), logger.ERROR) + logger.error("Error while taking backup: %s" % ex(error)) return False, 'Backup failed' finally: if backup_con: @@ -226,8 +226,8 @@ class DBConnection(object): self.connection.commit() if 0 < affected: - logger.debug(u'Transaction with %s queries executed affected at least %i row%s' % ( - len(queries), affected, helpers.maybe_plural(affected))) + logger.debug(f'Transaction with {len(queries)} queries executed affected at least {affected:d}' + f' row{helpers.maybe_plural(affected)}') return sql_result except sqlite3.OperationalError as e: sql_result = [] @@ -239,7 +239,7 @@ class DBConnection(object): except sqlite3.DatabaseError as e: if self.connection: self.connection.rollback() - logger.error(u'Fatal error executing query: ' + ex(e)) + logger.error(f'Fatal error executing query: {ex(e)}') raise return sql_result @@ -248,10 +248,10 @@ class DBConnection(object): def action_error(e): if 'unable to open database file' in e.args[0] or 'database is locked' in e.args[0]: - logger.log(u'DB error: ' + ex(e), logger.WARNING) + logger.warning(f'DB error: {ex(e)}') time.sleep(1) return True - logger.log(u'DB error: ' + ex(e), logger.ERROR) + logger.error(f'DB error: {ex(e)}') def action(self, query, args=None): # type: (AnyStr, Optional[List, Tuple]) -> Optional[Union[List, sqlite3.Cursor]] @@ -280,7 +280,7 @@ class DBConnection(object): raise attempt += 1 except sqlite3.DatabaseError as e: - logger.log(u'Fatal error executing query: ' + ex(e), logger.ERROR) + logger.error(f'Fatal error executing query: {ex(e)}') raise return sql_result @@ -424,7 +424,7 @@ class DBSanityCheck(object): def upgrade_database(connection, schema): - logger.log(u'Checking database structure...', logger.MESSAGE) + logger.log('Checking database structure...', logger.MESSAGE) connection.is_upgrading = False connection.new_db = 0 == connection.check_db_version() _process_upgrade(connection, schema) @@ -438,16 +438,16 @@ def _pretty_name(class_name): def _restore_database(filename, version): - logger.log(u'Restoring database before trying upgrade again') + logger.log('Restoring database before trying upgrade again') if not sickgear.helpers.restore_versioned_file(db_filename(filename=filename, suffix='v%s' % version), version): - logger.log_error_and_exit(u'Database restore failed, abort upgrading database') + logger.log_error_and_exit('Database restore failed, abort upgrading database') return False return True def _process_upgrade(connection, upgrade_class): instance = upgrade_class(connection) - logger.log('Checking %s database upgrade' % _pretty_name(upgrade_class.__name__), logger.DEBUG) + logger.debug('Checking %s database upgrade' % _pretty_name(upgrade_class.__name__)) if not instance.test(): connection.is_upgrading = True connection.upgrade_log(getattr(upgrade_class, 'pretty_name', None) or _pretty_name(upgrade_class.__name__)) @@ -471,9 +471,9 @@ def _process_upgrade(connection, upgrade_class): else: logger.log_error_and_exit('Database upgrade failed, can\'t determine old db version, not restoring.') - logger.log('%s upgrade completed' % upgrade_class.__name__, logger.DEBUG) + logger.debug('%s upgrade completed' % upgrade_class.__name__) else: - logger.log('%s upgrade not required' % upgrade_class.__name__, logger.DEBUG) + logger.debug('%s upgrade not required' % upgrade_class.__name__) for upgradeSubClass in upgrade_class.__subclasses__(): _process_upgrade(connection, upgradeSubClass) @@ -710,15 +710,15 @@ def migration_code(my_db): db_version = my_db.check_db_version() my_db.new_db = 0 == db_version - logger.log(u'Detected database version: v%s' % db_version, logger.DEBUG) + logger.debug(f'Detected database version: v{db_version}') if not (db_version in schema): if db_version == sickgear.mainDB.MAX_DB_VERSION: - logger.log(u'Database schema is up-to-date, no upgrade required') + logger.log('Database schema is up-to-date, no upgrade required') elif 10000 > db_version: - logger.log_error_and_exit(u'SickGear does not currently support upgrading from this database version') + logger.log_error_and_exit('SickGear does not currently support upgrading from this database version') else: - logger.log_error_and_exit(u'Invalid database version') + logger.log_error_and_exit('Invalid database version') else: @@ -733,13 +733,13 @@ def migration_code(my_db): cleanup_old_db_backups(my_db.filename) except (BaseException, Exception) as e: my_db.close() - logger.log(u'Failed to update database with error: %s attempting recovery...' % ex(e), logger.ERROR) + logger.error(f'Failed to update database with error: {ex(e)} attempting recovery...') if _restore_database(my_db.filename, db_version): # initialize the main SB database - logger.log_error_and_exit(u'Successfully restored database version: %s' % db_version) + logger.log_error_and_exit(f'Successfully restored database version: {db_version}') else: - logger.log_error_and_exit(u'Failed to restore database version: %s' % db_version) + logger.log_error_and_exit(f'Failed to restore database version: {db_version}') my_db.upgrade_log('Finished') @@ -765,11 +765,11 @@ def backup_database(db_connection, filename, version): logger.debug('new db, no backup required') return - logger.log(u'Backing up database before upgrade') + logger.log('Backing up database before upgrade') if not sickgear.helpers.backup_versioned_file(db_filename(filename), version): - logger.log_error_and_exit(u'Database backup failed, abort upgrading database') + logger.log_error_and_exit('Database backup failed, abort upgrading database') else: - logger.log(u'Proceeding with upgrade') + logger.log('Proceeding with upgrade') def get_rollback_module(): @@ -836,7 +836,7 @@ def backup_all_dbs(target, compress=True, prefer_7z=True): :return: success, message """ if not make_path(target): - logger.log('Failed to create db backup dir', logger.ERROR) + logger.error('Failed to create db backup dir') return False, 'Failed to create db backup dir' my_db = DBConnection('cache.db') last_backup = my_db.select('SELECT time FROM lastUpdate WHERE provider = ?', ['sickgear_db_backup']) diff --git a/sickgear/failedProcessor.py b/sickgear/failedProcessor.py index e1e6a40b..7af0483b 100644 --- a/sickgear/failedProcessor.py +++ b/sickgear/failedProcessor.py @@ -67,30 +67,33 @@ class FailedProcessor(LegacyFailedProcessor): :return: success :type: bool or None """ - self._log(u'Failed download detected: (%s, %s)' % (self.nzb_name, self.dir_name)) + self._log(f'Failed download detected: ({self.nzb_name}, {self.dir_name})') release_name = show_name_helpers.determine_release_name(self.dir_name, self.nzb_name) if None is release_name: - self._log(u'Warning: unable to find a valid release name.', logger.WARNING) + self._log('Warning: unable to find a valid release name.', logger.WARNING) raise exceptions_helper.FailedProcessingFailed() try: parser = NameParser(False, show_obj=self.show_obj, convert=True) parsed = parser.parse(release_name) except InvalidNameException: - self._log(u'Error: release name is invalid: ' + release_name, logger.DEBUG) + self._log(f'Error: release name is invalid: {release_name}', logger.DEBUG) raise exceptions_helper.FailedProcessingFailed() except InvalidShowException: - self._log(u'Error: unable to parse release name %s into a valid show' % release_name, logger.DEBUG) + self._log(f'Error: unable to parse release name {release_name} into a valid show', logger.DEBUG) raise exceptions_helper.FailedProcessingFailed() - logger.log(u"name_parser info: ", logger.DEBUG) - logger.log(u" - " + str(parsed.series_name), logger.DEBUG) - logger.log(u" - " + str(parsed.season_number), logger.DEBUG) - logger.log(u" - " + str(parsed.episode_numbers), logger.DEBUG) - logger.log(u" - " + str(parsed.extra_info), logger.DEBUG) - logger.log(u" - " + str(parsed.release_group), logger.DEBUG) - logger.log(u" - " + str(parsed.air_date), logger.DEBUG) + for cur_msg in ( + 'name_parser info: ', + f' - {parsed.series_name}', + f' - {parsed.season_number}', + f' - {parsed.episode_numbers}', + f' - {parsed.extra_info}', + f' - {parsed.release_group}', + f' - {parsed.air_date}' + ): + logger.debug(cur_msg) for episode in parsed.episode_numbers: segment = parsed.show_obj.get_episode(parsed.season_number, episode) diff --git a/sickgear/failed_history.py b/sickgear/failed_history.py index 8edc87fe..9afa6884 100644 --- a/sickgear/failed_history.py +++ b/sickgear/failed_history.py @@ -99,21 +99,20 @@ def add_failed(release): sql_result = db_select('SELECT * FROM history t WHERE t.release=?', [release]) if not any(sql_result): - logger.log('Release not found in failed.db snatch history', logger.WARNING) + logger.warning('Release not found in failed.db snatch history') elif 1 < len(sql_result): - logger.log('Multiple logged snatches found for release in failed.db', logger.WARNING) + logger.warning('Multiple logged snatches found for release in failed.db') sizes = len(set([x['size'] for x in sql_result])) providers = len(set([x['provider'] for x in sql_result])) if 1 == sizes: - logger.log('However, they\'re all the same size. Continuing with found size', logger.WARNING) + logger.warning('However, they\'re all the same size. Continuing with found size') size = sql_result[0]['size'] else: - logger.log( - 'They also vary in size. Deleting logged snatches and recording this release with no size/provider', - logger.WARNING) + logger.warning( + 'They also vary in size. Deleting logged snatches and recording this release with no size/provider') for cur_result in sql_result: remove_snatched(cur_result['release'], cur_result['size'], cur_result['provider']) @@ -165,7 +164,7 @@ def set_episode_failed(ep_obj): ep_obj.save_to_db() except EpisodeNotFoundException as e: - logger.log('Unable to get episode, please set its status manually: %s' % ex(e), logger.WARNING) + logger.warning('Unable to get episode, please set its status manually: %s' % ex(e)) def remove_failed(release): @@ -237,13 +236,13 @@ def revert_episode(ep_obj): else: status_revert = WANTED - logger.log('Episode not found in failed.db history. Setting it to WANTED', logger.WARNING) + logger.warning('Episode not found in failed.db history. Setting it to WANTED') ep_obj.status = status_revert ep_obj.save_to_db() except EpisodeNotFoundException as e: - logger.log('Unable to create episode, please set its status manually: %s' % ex(e), logger.WARNING) + logger.warning('Unable to create episode, please set its status manually: %s' % ex(e)) def find_old_status(ep_obj): @@ -289,8 +288,7 @@ def find_release(ep_obj): db_action('DELETE FROM history WHERE %s=? AND %s!=?' % ('`release`', '`date`'), [release, r['date']]) # Found a previously failed release - logger.log('Found failed.db history release %sx%s: [%s]' % ( - ep_obj.season, ep_obj.episode, release), logger.DEBUG) + logger.debug(f'Found failed.db history release {ep_obj.season}x{ep_obj.episode}: [{release}]') else: release = None provider = None diff --git a/sickgear/generic_queue.py b/sickgear/generic_queue.py index d57ebcbe..11c77bbe 100644 --- a/sickgear/generic_queue.py +++ b/sickgear/generic_queue.py @@ -89,7 +89,7 @@ class GenericQueue(object): my_db = db.DBConnection('cache.db') my_db.mass_action(cl) except (BaseException, Exception) as e: - logger.log('Exception saving queue %s to db: %s' % (self.__class__.__name__, ex(e)), logger.ERROR) + logger.error('Exception saving queue %s to db: %s' % (self.__class__.__name__, ex(e))) def _clear_sql(self): # type: (...) -> List[List] @@ -103,7 +103,7 @@ class GenericQueue(object): my_db = db.DBConnection('cache.db') my_db.mass_action(item_sql) except (BaseException, Exception) as e: - logger.log('Exception saving item %s to db: %s' % (item, ex(e)), logger.ERROR) + logger.error('Exception saving item %s to db: %s' % (item, ex(e))) def delete_item(self, item, finished_run=False): # type: (Union[QueueItem, CastQueueItem], bool) -> None @@ -119,7 +119,7 @@ class GenericQueue(object): my_db = db.DBConnection('cache.db') my_db.mass_action(item_sql) except (BaseException, Exception) as e: - logger.log('Exception deleting item %s from db: %s' % (item, ex(e)), logger.ERROR) + logger.error('Exception deleting item %s from db: %s' % (item, ex(e))) def _get_item_sql(self, item): # type: (Union[QueueItem, CastQueueItem]) -> List[List] @@ -211,12 +211,12 @@ class GenericQueue(object): my_db.mass_action(del_main_sql) def pause(self): - logger.log(u'Pausing queue') + logger.log('Pausing queue') if self.lock: self.min_priority = 999999999999 def unpause(self): - logger.log(u'Unpausing queue') + logger.log('Unpausing queue') with self.lock: self.min_priority = 0 @@ -258,7 +258,7 @@ class GenericQueue(object): if 0 == len(self.events[event_type]): del self.events[event_type] except (BaseException, Exception) as e: - logger.log('Error removing event method from queue: %s' % ex(e), logger.ERROR) + logger.error('Error removing event method from queue: %s' % ex(e)) def execute_events(self, event_type, *args, **kwargs): # type: (int, Tuple, Dict) -> None @@ -267,7 +267,7 @@ class GenericQueue(object): try: event(*args, **kwargs) except (BaseException, Exception) as e: - logger.log('Error executing Event: %s' % ex(e), logger.ERROR) + logger.error('Error executing Event: %s' % ex(e)) def run(self): diff --git a/sickgear/helpers.py b/sickgear/helpers.py index 3fc8c499..4cb670a3 100644 --- a/sickgear/helpers.py +++ b/sickgear/helpers.py @@ -345,7 +345,7 @@ def list_media_files(path): result = [] if path: if [direntry for direntry in scantree(path, include=[r'\.sickgearignore'], filter_kind=False, recurse=False)]: - logger.log('Skipping folder "%s" because it contains ".sickgearignore"' % path, logger.DEBUG) + logger.debug('Skipping folder "%s" because it contains ".sickgearignore"' % path) else: result = [direntry.path for direntry in scantree(path, exclude=['Extras'], filter_kind=False) if has_media_ext(direntry.name)] @@ -405,8 +405,7 @@ def hardlink_file(src_file, dest_file): link(src_file, dest_file) fix_set_group_id(dest_file) except (BaseException, Exception) as e: - logger.log(u"Failed to create hardlink of %s at %s: %s. Copying instead." % (src_file, dest_file, ex(e)), - logger.ERROR) + logger.error(f'Failed to create hardlink of {src_file} at {dest_file}: {ex(e)}. Copying instead.') copy_file(src_file, dest_file) @@ -441,7 +440,7 @@ def move_and_symlink_file(src_file, dest_file): fix_set_group_id(dest_file) symlink(dest_file, src_file) except (BaseException, Exception): - logger.log(u"Failed to create symlink of %s at %s. Copying instead" % (src_file, dest_file), logger.ERROR) + logger.error(f'Failed to create symlink of {src_file} at {dest_file}. Copying instead') copy_file(src_file, dest_file) @@ -488,10 +487,10 @@ def rename_ep_file(cur_path, new_path, old_path_length=0): # move the file try: - logger.log(u'Renaming file from %s to %s' % (cur_path, new_path)) + logger.log(f'Renaming file from {cur_path} to {new_path}') shutil.move(cur_path, new_path) except (OSError, IOError) as e: - logger.log(u"Failed renaming " + cur_path + " to " + new_path + ": " + ex(e), logger.ERROR) + logger.error(f'Failed renaming {cur_path} to {new_path}: {ex(e)}') return False # clean up any old folders that are empty @@ -513,7 +512,7 @@ def delete_empty_folders(check_empty_dir, keep_dir=None): # treat check_empty_dir as empty when it only contains these items ignore_items = [] - logger.log(u"Trying to clean any empty folders under " + check_empty_dir) + logger.log(f'Trying to clean any empty folders under {check_empty_dir}') # as long as the folder exists and doesn't contain any files, delete it while os.path.isdir(check_empty_dir) and check_empty_dir != keep_dir: @@ -523,13 +522,13 @@ def delete_empty_folders(check_empty_dir, keep_dir=None): [check_file in ignore_items for check_file in check_files])): # directory is empty or contains only ignore_items try: - logger.log(u"Deleting empty folder: " + check_empty_dir) + logger.log(f"Deleting empty folder: {check_empty_dir}") # need shutil.rmtree when ignore_items is really implemented os.rmdir(check_empty_dir) # do a Synology library update notifiers.NotifierFactory().get('SYNOINDEX').deleteFolder(check_empty_dir) except OSError as e: - logger.log(u"Unable to delete " + check_empty_dir + ": " + repr(e) + " / " + ex(e), logger.WARNING) + logger.warning(f'Unable to delete {check_empty_dir}: {repr(e)} / {ex(e)}') break check_empty_dir = os.path.dirname(check_empty_dir) else: @@ -559,9 +558,7 @@ def get_absolute_number_from_season_and_episode(show_obj, season, episode): if 1 == len(sql_result): absolute_number = int(sql_result[0]["absolute_number"]) - logger.log( - "Found absolute_number:" + str(absolute_number) + " by " + str(season) + "x" + str(episode), - logger.DEBUG) + logger.debug(f'Found absolute_number:{absolute_number} by {season}x{episode}') else: logger.debug('No entries for absolute number in show: %s found using %sx%s' % (show_obj.unique_name, str(season), str(episode))) @@ -600,7 +597,7 @@ def sanitize_scene_name(name): :rtype: AnyStr """ if name: - bad_chars = u',:()£\'!?\u2019' + bad_chars = ',:()£\'!?\u2019' # strip out any bad chars name = re.sub(r'[%s]' % bad_chars, '', name, flags=re.U) @@ -654,7 +651,7 @@ def parse_xml(data, del_xmlns=False): try: parsed_xml = etree.fromstring(data) except (BaseException, Exception) as e: - logger.log(u"Error trying to parse xml data. Error: " + ex(e), logger.DEBUG) + logger.debug(f"Error trying to parse xml data. Error: {ex(e)}") parsed_xml = None return parsed_xml @@ -686,28 +683,28 @@ def backup_versioned_file(old_file, version): except (BaseException, Exception): if os.path.isfile(new_file): continue - logger.log('could not rename old backup db file', logger.WARNING) + logger.warning('could not rename old backup db file') if not changed_old_db: raise Exception('can\'t create a backup of db') while not os.path.isfile(new_file): if not os.path.isfile(old_file) or 0 == get_size(old_file): - logger.log(u'No need to create backup', logger.DEBUG) + logger.debug('No need to create backup') break try: - logger.log(u'Trying to back up %s to %s' % (old_file, new_file), logger.DEBUG) + logger.debug(f'Trying to back up {old_file} to {new_file}') shutil.copy(old_file, new_file) - logger.log(u'Backup done', logger.DEBUG) + logger.debug('Backup done') break except (BaseException, Exception) as e: - logger.log(u'Error while trying to back up %s to %s : %s' % (old_file, new_file, ex(e)), logger.WARNING) + logger.warning(f'Error while trying to back up {old_file} to {new_file} : {ex(e)}') num_tries += 1 time.sleep(3) - logger.log(u'Trying again.', logger.DEBUG) + logger.debug('Trying again.') if 3 <= num_tries: - logger.log(u'Unable to back up %s to %s please do it manually.' % (old_file, new_file), logger.ERROR) + logger.error(f'Unable to back up {old_file} to {new_file} please do it manually.') return False return True @@ -729,39 +726,34 @@ def restore_versioned_file(backup_file, version): restore_file = new_file + '.' + 'v' + str(version) if not os.path.isfile(new_file): - logger.log(u"Not restoring, " + new_file + " doesn't exist", logger.DEBUG) + logger.debug(f'Not restoring, {new_file} doesn\'t exist') return False try: - logger.log( - u"Trying to backup " + new_file + " to " + new_file + "." + "r" + str(version) + " before restoring backup", - logger.DEBUG) + logger.debug(f'Trying to backup {new_file} to {new_file}.r{version} before restoring backup') shutil.move(new_file, new_file + '.' + 'r' + str(version)) except (BaseException, Exception) as e: - logger.log( - u"Error while trying to backup DB file " + restore_file + " before proceeding with restore: " + ex(e), - logger.WARNING) + logger.warning(f'Error while trying to backup DB file {restore_file} before proceeding with restore: {ex(e)}') return False while not os.path.isfile(new_file): if not os.path.isfile(restore_file): - logger.log(u"Not restoring, " + restore_file + " doesn't exist", logger.DEBUG) + logger.debug(f'Not restoring, {restore_file} doesn\'t exist') break try: - logger.log(u"Trying to restore " + restore_file + " to " + new_file, logger.DEBUG) + logger.debug(f'Trying to restore {restore_file} to {new_file}') shutil.copy(restore_file, new_file) - logger.log(u"Restore done", logger.DEBUG) + logger.debug('Restore done') break except (BaseException, Exception) as e: - logger.log(u"Error while trying to restore " + restore_file + ": " + ex(e), logger.WARNING) + logger.warning(f'Error while trying to restore {restore_file}: {ex(e)}') num_tries += 1 time.sleep(1) - logger.log(u"Trying again.", logger.DEBUG) + logger.debug('Trying again.') if 10 <= num_tries: - logger.log(u"Unable to restore " + restore_file + " to " + new_file + " please do it manually.", - logger.ERROR) + logger.error(f'Unable to restore {restore_file} to {new_file} please do it manually.') return False return True @@ -963,7 +955,7 @@ def get_show(name, try_scene_exceptions=False): if tvid and prodid: show_obj = find_show_by_id({tvid: prodid}) except (BaseException, Exception) as e: - logger.log(u'Error when attempting to find show: ' + name + ' in SickGear: ' + ex(e), logger.DEBUG) + logger.debug(f'Error when attempting to find show: {name} in SickGear: {ex(e)}') return show_obj @@ -1051,8 +1043,9 @@ def clear_cache(force=False): except OSError: dirty = True - logger.log(u'%s from cache folder %s' % ((('Found items not removed', 'Found items removed')[not dirty], - 'No items found to remove')[None is dirty], sickgear.CACHE_DIR)) + logger.log( + f'{(("Found items not removed", "Found items removed")[not dirty], "No items found to remove")[None is dirty]}' + f' from cache folder {sickgear.CACHE_DIR}') def human(size): @@ -1298,7 +1291,7 @@ def make_search_segment_html_string(segment, max_eps=5): segment = [segment] if segment and len(segment) > max_eps: seasons = [x for x in set([x.season for x in segment])] - seg_str = u'Season%s: ' % maybe_plural(len(seasons)) + seg_str = f'Season{maybe_plural(len(seasons))}: ' divider = '' for x in seasons: eps = [str(s.episode) for s in segment if x == s.season] @@ -1308,7 +1301,7 @@ def make_search_segment_html_string(segment, max_eps=5): divider = ', ' elif segment: episode_numbers = ['S%sE%s' % (str(x.season).zfill(2), str(x.episode).zfill(2)) for x in segment] - seg_str = u'Episode%s: %s' % (maybe_plural(len(episode_numbers)), ', '.join(episode_numbers)) + seg_str = f'Episode{maybe_plural(len(episode_numbers))}: {", ".join(episode_numbers)}' return seg_str @@ -1394,7 +1387,7 @@ def should_delete_episode(status): s = Quality.split_composite_status(status)[0] if s not in SNATCHED_ANY + [DOWNLOADED, ARCHIVED, IGNORED]: return True - logger.log('not safe to delete episode from db because of status: %s' % statusStrings[s], logger.DEBUG) + logger.debug('not safe to delete episode from db because of status: %s' % statusStrings[s]) return False @@ -1573,7 +1566,7 @@ def count_files_dirs(base_dir): try: files = scandir(base_dir) except OSError as e: - logger.log('Unable to count files %s / %s' % (repr(e), ex(e)), logger.WARNING) + logger.warning('Unable to count files %s / %s' % (repr(e), ex(e))) else: for e in files: if e.is_file(): @@ -1643,8 +1636,8 @@ def upgrade_new_naming(): try: move_file(entry.path, new_name) except (BaseException, Exception) as e: - logger.log('Unable to rename %s to %s: %s / %s' - % (entry.path, new_name, repr(e), ex(e)), logger.WARNING) + logger.warning('Unable to rename %s to %s: %s / %s' + % (entry.path, new_name, repr(e), ex(e))) else: # clean up files without reference in db try: @@ -1664,7 +1657,7 @@ def upgrade_new_naming(): try: entries = scandir(entry.path) except OSError as e: - logger.log('Unable to stat dirs %s / %s' % (repr(e), ex(e)), logger.WARNING) + logger.warning('Unable to stat dirs %s / %s' % (repr(e), ex(e))) continue for d_entry in entries: if d_entry.is_dir(): @@ -1679,14 +1672,13 @@ def upgrade_new_naming(): try: move_file(d_entry.path, new_dir_name) except (BaseException, Exception) as e: - logger.log('Unable to rename %s to %s: %s / %s' % - (d_entry.path, new_dir_name, repr(e), ex(e)), logger.WARNING) + logger.warning(f'Unable to rename {d_entry.path} to {new_dir_name}:' + f' {repr(e)} / {ex(e)}') if os.path.isdir(new_dir_name): try: f_n = filter(lambda fn: fn.is_file(), scandir(new_dir_name)) except OSError as e: - logger.log('Unable to rename %s / %s' % (repr(e), ex(e)), - logger.WARNING) + logger.warning('Unable to rename %s / %s' % (repr(e), ex(e))) else: rename_args = [] # noinspection PyTypeChecker @@ -1697,8 +1689,8 @@ def upgrade_new_naming(): try: move_file(*args) except (BaseException, Exception) as e: - logger.log('Unable to rename %s to %s: %s / %s' % - (args[0], args[1], repr(e), ex(e)), logger.WARNING) + logger.warning(f'Unable to rename {args[0]} to {args[1]}:' + f' {repr(e)} / {ex(e)}') else: try: shutil.rmtree(d_entry.path) @@ -1754,11 +1746,11 @@ def normalise_chars(text): :return: Text with entities replaced :rtype: AnyStr """ - result = text.replace(u'\u2010', u'-').replace(u'\u2011', u'-').replace(u'\u2012', u'-') \ - .replace(u'\u2013', u'-').replace(u'\u2014', u'-').replace(u'\u2015', u'-') \ - .replace(u'\u2018', u"'").replace(u'\u2019', u"'") \ - .replace(u'\u201c', u'\"').replace(u'\u201d', u'\"') \ - .replace(u'\u0020', u' ').replace(u'\u00a0', u' ') + result = text.replace('\u2010', '-').replace('\u2011', '-').replace('\u2012', '-') \ + .replace('\u2013', '-').replace('\u2014', '-').replace('\u2015', '-') \ + .replace('\u2018', "'").replace('\u2019', "'") \ + .replace('\u201c', '\"').replace('\u201d', '\"') \ + .replace('\u0020', ' ').replace('\u00a0', ' ') return result diff --git a/sickgear/image_cache.py b/sickgear/image_cache.py index f0372358..10fe08da 100644 --- a/sickgear/image_cache.py +++ b/sickgear/image_cache.py @@ -277,9 +277,9 @@ class ImageCache(object): result = [] for filename in glob.glob(image_file): result.append(os.path.isfile(filename) and filename) - logger.log(u'Found cached %s' % filename, logger.DEBUG) + logger.debug(f'Found cached {filename}') - not any(result) and logger.log(u'No cache for %s' % image_file, logger.DEBUG) + not any(result) and logger.debug(f'No cache for {image_file}') return any(result) def has_poster(self, tvid, prodid): @@ -365,7 +365,7 @@ class ImageCache(object): :param is_binary: is data instead of path """ if not is_binary and not os.path.isfile(image): - logger.warning(u'File not found to determine image type of %s' % image) + logger.warning(f'File not found to determine image type of {image}') return if not image: logger.warning('No Image Data to determinate image type') @@ -381,7 +381,7 @@ class ImageCache(object): img_parser.parse_photoshop_content = False img_metadata = extractMetadata(img_parser) except (BaseException, Exception) as e: - logger.debug(u'Unable to extract metadata from %s, not using file. Error: %s' % (image, ex(e))) + logger.debug(f'Unable to extract metadata from {image}, not using file. Error: {ex(e)}') return if not img_metadata: @@ -389,7 +389,7 @@ class ImageCache(object): msg = 'Image Data' else: msg = image - logger.debug(u'Unable to extract metadata from %s, not using file' % msg) + logger.debug(f'Unable to extract metadata from {msg}, not using file') return width = img_metadata.get('width') @@ -441,9 +441,9 @@ class ImageCache(object): logger.debug(msg_success % 'fanart') return self.FANART - logger.warning(u'Skipped image with fanart aspect ratio but less than 500 pixels wide') + logger.warning('Skipped image with fanart aspect ratio but less than 500 pixels wide') else: - logger.warning(u'Skipped image with useless ratio %s' % img_ratio) + logger.warning(f'Skipped image with useless ratio {img_ratio}') def should_refresh(self, image_type=None, provider='local'): # type: (int, Optional[AnyStr]) -> bool @@ -522,13 +522,13 @@ class ImageCache(object): dest_path = self.fanart_path(*id_args + (prefix,)).replace('.fanart.jpg', '.%s.fanart.jpg' % crc) fanart_dir = [self._fanart_dir(*id_args)] else: - logger.log(u'Invalid cache image type: ' + str(img_type), logger.ERROR) + logger.error(f'Invalid cache image type: {img_type}') return False for cache_dir in [self.shows_dir, self._thumbnails_dir(*id_args)] + fanart_dir: sg_helpers.make_path(cache_dir) - logger.log(u'%sing from %s to %s' % (('Copy', 'Mov')[move_file], image_path, dest_path)) + logger.log(f'{("Copy", "Mov")[move_file]}ing from {image_path} to {dest_path}') # copy poster, banner as thumb, even if moved we need to duplicate the images if img_type in (self.POSTER, self.BANNER) and dest_thumb_path: sg_helpers.copy_file(image_path, dest_thumb_path) @@ -574,7 +574,7 @@ class ImageCache(object): img_type_name = 'banner_thumb' dest_path = self.banner_thumb_path(*arg_tvid_prodid) else: - logger.log(u'Invalid cache image type: ' + str(img_type), logger.ERROR) + logger.error(f'Invalid cache image type: {img_type}') return False # retrieve the image from TV info source using the generic metadata class @@ -625,10 +625,9 @@ class ImageCache(object): if num_files > max_files: break total = len(glob.glob(dest_path)) - logger.log(u'Saved %s fanart images%s. Cached %s of max %s fanart file%s' - % (success, - ('', ' from ' + ', '.join([x for x in list(set(sources))]))[0 < len(sources)], - total, sickgear.FANART_LIMIT, sg_helpers.maybe_plural(total))) + logger.log(f'Saved {success} fanart images' + f'{("", " from " + ", ".join([x for x in list(set(sources))]))[0 < len(sources)]}.' + f' Cached {total} of max {sickgear.FANART_LIMIT} fanart file{sg_helpers.maybe_plural(total)}') return bool(success) image_urls = metadata_generator.retrieve_show_image(img_type_name, show_obj, return_links=True, @@ -656,7 +655,7 @@ class ImageCache(object): break if result: - logger.log(u'Saved image type %s' % img_type_name) + logger.log(f'Saved image type {img_type_name}') return result def fill_cache(self, show_obj, force=False): @@ -683,7 +682,7 @@ class ImageCache(object): self.BANNER_THUMB: not self.has_banner_thumbnail(*arg_tvid_prodid) or force} if not any(itervalues(need_images)): - logger.log(u'%s: No new cache images needed. Done.' % show_obj.tvid_prodid) + logger.log(f'{show_obj.tvid_prodid}: No new cache images needed. Done.') return show_infos = GenericMetadata.gen_show_infos_dict(show_obj) @@ -698,7 +697,7 @@ class ImageCache(object): del (sickgear.FANART_RATINGS[show_obj.tvid_prodid]) result = sg_helpers.remove_file(cache_dir, tree=True) if result: - logger.log(u'%s cache file %s' % (result, cache_dir), logger.DEBUG) + logger.debug(f'{result} cache file {cache_dir}') try: checked_files = [] @@ -718,7 +717,7 @@ class ImageCache(object): if 0 == len(needed): break - logger.log(u'Checking for images from optional %s metadata' % cur_provider.name, logger.DEBUG) + logger.debug(f'Checking for images from optional {cur_provider.name} metadata') for all_meta_provs, path_file in needed: checked_files += [path_file] @@ -735,9 +734,10 @@ class ImageCache(object): if None is cur_file_type: continue - logger.log(u'Checking if image %s (type %s needs metadata: %s)' - % (cache_file_name, str(cur_file_type), - ('No', 'Yes')[True is need_images[cur_file_type]]), logger.DEBUG) + logger.debug(f'Checking if image {cache_file_name} ' + f'(type {str(cur_file_type)}' + f' needs metadata: {("No", "Yes")[True is need_images[cur_file_type]]}' + f')') if need_images.get(cur_file_type): need_images[cur_file_type] = ( @@ -746,8 +746,8 @@ class ImageCache(object): if self.FANART == cur_file_type and \ (not sickgear.FANART_LIMIT or sickgear.FANART_LIMIT < need_images[cur_file_type]): continue - logger.log(u'Caching image found in the show directory to the image cache: %s, type %s' - % (cache_file_name, cur_file_type), logger.DEBUG) + logger.debug(f'Caching image found in the show directory to the image cache: {cache_file_name},' + f' type {cur_file_type}') self._cache_image_from_file( cache_file_name, cur_file_type, @@ -755,7 +755,7 @@ class ImageCache(object): isinstance(need_images[cur_file_type], bool)],)) except exceptions_helper.ShowDirNotFoundException: - logger.log(u'Unable to search for images in show directory because it doesn\'t exist', logger.WARNING) + logger.warning('Unable to search for images in show directory because it doesn\'t exist') # download images from TV info sources for image_type, name_type in [[self.POSTER, 'Poster'], [self.BANNER, 'Banner'], [self.FANART, 'Fanart']]: @@ -763,12 +763,12 @@ class ImageCache(object): if not max_files or max_files < need_images[image_type]: continue - logger.log(u'Seeing if we still need an image of type %s: %s' - % (name_type, ('No', 'Yes')[True is need_images[image_type]]), logger.DEBUG) + logger.debug(f'Seeing if we still need an image of type {name_type}:' + f' {("No", "Yes")[True is need_images[image_type]]}') if need_images[image_type]: file_num = (need_images[image_type] + 1, 1)[isinstance(need_images[image_type], bool)] if file_num <= max_files: self._cache_info_source_images(show_obj, image_type, file_num, max_files, force=force, show_infos=show_infos) - logger.log(u'Done cache check') + logger.log('Done cache check') diff --git a/sickgear/indexermapper.py b/sickgear/indexermapper.py index 38d0f022..fcb924c2 100644 --- a/sickgear/indexermapper.py +++ b/sickgear/indexermapper.py @@ -408,7 +408,7 @@ def load_mapped_ids(**kwargs): cur_show_obj.ids = sickgear.indexermapper.map_indexers_to_show(cur_show_obj, **n_kargs) except (BaseException, Exception): logger.debug('Error loading mapped id\'s for show: %s' % cur_show_obj.unique_name) - logger.log('Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error('Traceback: %s' % traceback.format_exc()) logger.log('TV info mappings loaded') diff --git a/sickgear/logger.py b/sickgear/logger.py index 2e479737..702fdb8f 100644 --- a/sickgear/logger.py +++ b/sickgear/logger.py @@ -51,7 +51,7 @@ MESSAGE = logging.INFO DEBUG = logging.DEBUG DB = 5 -reverseNames = {u'ERROR': ERROR, u'WARNING': WARNING, u'INFO': MESSAGE, u'DEBUG': DEBUG, u'DB': DB} +reverseNames = {'ERROR': ERROR, 'WARNING': WARNING, 'INFO': MESSAGE, 'DEBUG': DEBUG, 'DB': DB} # suppress output with this handler diff --git a/sickgear/metadata/generic.py b/sickgear/metadata/generic.py index 62b122dc..00c971ef 100644 --- a/sickgear/metadata/generic.py +++ b/sickgear/metadata/generic.py @@ -150,31 +150,31 @@ class GenericMetadata(object): def _has_show_metadata(self, show_obj): # type: (sickgear.tv.TVShow) -> bool result = os.path.isfile(self.get_show_file_path(show_obj)) - logger.log(u"Checking if " + self.get_show_file_path(show_obj) + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {self.get_show_file_path(show_obj)} exists: {result}') return result def has_episode_metadata(self, ep_obj): # type: (sickgear.tv.TVEpisode) -> bool result = os.path.isfile(self.get_episode_file_path(ep_obj)) - logger.log(u"Checking if " + self.get_episode_file_path(ep_obj) + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {self.get_episode_file_path(ep_obj)} exists: {result}') return result def _has_fanart(self, show_obj): # type: (sickgear.tv.TVShow) -> bool result = os.path.isfile(self.get_fanart_path(show_obj)) - logger.log(u"Checking if " + self.get_fanart_path(show_obj) + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {self.get_fanart_path(show_obj)} exists: {result}') return result def _has_poster(self, show_obj): # type: (sickgear.tv.TVShow) -> bool result = os.path.isfile(self.get_poster_path(show_obj)) - logger.log(u"Checking if " + self.get_poster_path(show_obj) + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {self.get_poster_path(show_obj)} exists: {result}') return result def _has_banner(self, show_obj): # type: (sickgear.tv.TVShow) -> bool result = os.path.isfile(self.get_banner_path(show_obj)) - logger.log(u"Checking if " + self.get_banner_path(show_obj) + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {self.get_banner_path(show_obj)} exists: {result}') return result def has_episode_thumb(self, ep_obj): @@ -182,7 +182,7 @@ class GenericMetadata(object): location = self.get_episode_thumb_path(ep_obj) result = None is not location and os.path.isfile(location) if location: - logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {location} exists: {result}') return result def _has_season_poster(self, show_obj, season): @@ -190,7 +190,7 @@ class GenericMetadata(object): location = self.get_season_poster_path(show_obj, season) result = None is not location and os.path.isfile(location) if location: - logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {location} exists: {result}') return result def _has_season_banner(self, show_obj, season): @@ -198,21 +198,19 @@ class GenericMetadata(object): location = self.get_season_banner_path(show_obj, season) result = None is not location and os.path.isfile(location) if location: - logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {location} exists: {result}') return result def _has_season_all_poster(self, show_obj): # type: (sickgear.tv.TVShow) -> bool result = os.path.isfile(self.get_season_all_poster_path(show_obj)) - logger.log(u"Checking if " + self.get_season_all_poster_path(show_obj) + " exists: " + str(result), - logger.DEBUG) + logger.debug(f'Checking if {self.get_season_all_poster_path(show_obj)} exists: {result}') return result def _has_season_all_banner(self, show_obj): # type: (sickgear.tv.TVShow) -> bool result = os.path.isfile(self.get_season_all_banner_path(show_obj)) - logger.log(u"Checking if " + self.get_season_all_banner_path(show_obj) + " exists: " + str(result), - logger.DEBUG) + logger.debug(f'Checking if {self.get_season_all_banner_path(show_obj)} exists: {result}') return result @staticmethod @@ -343,8 +341,7 @@ class GenericMetadata(object): isinstance(getattr(fetched_show_info, 'data', None), (list, dict)) and 'seriesname' in getattr(fetched_show_info, 'data', [])) and \ not hasattr(fetched_show_info, 'seriesname'): - logger.log(u'Show %s not found on %s ' % - (show_obj.name, sickgear.TVInfoAPI(show_obj.tvid).name), logger.WARNING) + logger.warning(f'Show {show_obj.name} not found on {sickgear.TVInfoAPI(show_obj.tvid).name} ') return False return True @@ -364,8 +361,8 @@ class GenericMetadata(object): try: result = self.write_show_file(show_obj) except BaseTVinfoError as e: - logger.log('Unable to find useful show metadata for %s on %s: %s' % ( - self.name, sickgear.TVInfoAPI(show_obj.tvid).name, ex(e)), logger.WARNING) + logger.warning(f'Unable to find useful show metadata for {self.name}' + f' on {sickgear.TVInfoAPI(show_obj.tvid).name}: {ex(e)}') return result @@ -373,21 +370,20 @@ class GenericMetadata(object): # type: (sickgear.tv.TVEpisode, bool) -> bool result = False if self.episode_metadata and ep_obj and (not self.has_episode_metadata(ep_obj) or force): - logger.log('Metadata provider %s creating episode metadata for %s' % (self.name, ep_obj.pretty_name()), - logger.DEBUG) + logger.debug('Metadata provider %s creating episode metadata for %s' % (self.name, ep_obj.pretty_name())) try: result = self.write_ep_file(ep_obj) except BaseTVinfoError as e: - logger.log('Unable to find useful episode metadata for %s on %s: %s' % ( - self.name, sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name, ex(e)), logger.WARNING) + logger.warning(f'Unable to find useful episode metadata for {self.name}' + f' on {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name}: {ex(e)}') return result def update_show_indexer_metadata(self, show_obj): # type: (sickgear.tv.TVShow) -> bool if self.show_metadata and show_obj and self._has_show_metadata(show_obj): - logger.debug(u'Metadata provider %s updating show indexer metadata file for %s' % ( - self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name}' + f' updating show indexer metadata file for {show_obj.unique_name}') nfo_file_path = self.get_show_file_path(show_obj) with io.open(nfo_file_path, 'r', encoding='utf8') as xmlFileObj: @@ -419,29 +415,28 @@ class GenericMetadata(object): def create_fanart(self, show_obj): # type: (sickgear.tv.TVShow) -> bool if self.fanart and show_obj and not self._has_fanart(show_obj): - logger.debug(u'Metadata provider %s creating fanart for %s' % (self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating fanart for {show_obj.unique_name}') return self.save_fanart(show_obj) return False def create_poster(self, show_obj): # type: (sickgear.tv.TVShow) -> bool if self.poster and show_obj and not self._has_poster(show_obj): - logger.debug(u'Metadata provider %s creating poster for %s' % (self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating poster for {show_obj.unique_name}') return self.save_poster(show_obj) return False def create_banner(self, show_obj): # type: (sickgear.tv.TVShow) -> bool if self.banner and show_obj and not self._has_banner(show_obj): - logger.debug(u'Metadata provider %s creating banner for %s' % (self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating banner for {show_obj.unique_name}') return self.save_banner(show_obj) return False def create_episode_thumb(self, ep_obj): # type: (sickgear.tv.TVEpisode) -> bool if self.episode_thumbnails and ep_obj and not self.has_episode_thumb(ep_obj): - logger.log(u"Metadata provider " + self.name + " creating episode thumbnail for " + ep_obj.pretty_name(), - logger.DEBUG) + logger.debug(f'Metadata provider {self.name} creating episode thumbnail for {ep_obj.pretty_name()}') return self.save_thumbnail(ep_obj) return False @@ -451,8 +446,7 @@ class GenericMetadata(object): result = [] for season, _ in iteritems(show_obj.sxe_ep_obj): if not self._has_season_poster(show_obj, season): - logger.debug(u'Metadata provider %s creating season posters for %s' % ( - self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating season posters for {show_obj.unique_name}') result = result + [self.save_season_posters(show_obj, season)] return all(result) return False @@ -463,8 +457,7 @@ class GenericMetadata(object): result = [] for season, _ in iteritems(show_obj.sxe_ep_obj): if not self._has_season_banner(show_obj, season): - logger.debug(u'Metadata provider %s creating season banners for %s' % ( - self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating season banners for {show_obj.unique_name}') result = result + [self.save_season_banners(show_obj, season)] return all(result) return False @@ -472,16 +465,14 @@ class GenericMetadata(object): def create_season_all_poster(self, show_obj): # type: (sickgear.tv.TVShow) -> bool if self.season_all_poster and show_obj and not self._has_season_all_poster(show_obj): - logger.debug(u'Metadata provider %s creating season all posters for %s' % ( - self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating season all posters for {show_obj.unique_name}') return self.save_season_all_poster(show_obj) return False def create_season_all_banner(self, show_obj): # type: (sickgear.tv.TVShow) -> bool if self.season_all_banner and show_obj and not self._has_season_all_banner(show_obj): - logger.debug(u'Metadata provider %s creating season all banner for %s' % ( - self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating season all banner for {show_obj.unique_name}') return self.save_season_all_banner(show_obj) return False @@ -557,7 +548,7 @@ class GenericMetadata(object): nfo_file_path = self.get_show_file_path(show_obj) - logger.log(u'Writing show metadata file: %s' % nfo_file_path, logger.DEBUG) + logger.debug(f'Writing show metadata file: {nfo_file_path}') return sg_helpers.write_file(nfo_file_path, data, xmltree=True, utf8=True) @@ -586,7 +577,7 @@ class GenericMetadata(object): nfo_file_path = self.get_episode_file_path(ep_obj) - logger.log(u'Writing episode metadata file: %s' % nfo_file_path, logger.DEBUG) + logger.debug(f'Writing episode metadata file: {nfo_file_path}') return sg_helpers.write_file(nfo_file_path, data, xmltree=True, utf8=True) @@ -603,14 +594,14 @@ class GenericMetadata(object): file_path = self.get_episode_thumb_path(ep_obj) if not file_path: - logger.log(u"Unable to find a file path to use for this thumbnail, not generating it", logger.DEBUG) + logger.debug('Unable to find a file path to use for this thumbnail, not generating it') return False thumb_url = self._get_episode_thumb_url(ep_obj) # if we can't find one then give up if not thumb_url: - logger.log(u"No thumb is available for this episode, not creating a thumb", logger.DEBUG) + logger.debug('No thumb is available for this episode, not creating a thumb') return False thumb_data = metadata_helpers.get_show_image(thumb_url, show_name=ep_obj.show_obj.name) @@ -641,7 +632,7 @@ class GenericMetadata(object): img_cache_type=sickgear.image_cache.ImageCache.FANART) if not fanart_data: - logger.log(u"No fanart image was retrieved, unable to write fanart", logger.DEBUG) + logger.debug('No fanart image was retrieved, unable to write fanart') return False return self._write_image(fanart_data, fanart_path) @@ -662,7 +653,7 @@ class GenericMetadata(object): img_cache_type=sickgear.image_cache.ImageCache.POSTER) if not poster_data: - logger.log(u"No show poster image was retrieved, unable to write poster", logger.DEBUG) + logger.debug('No show poster image was retrieved, unable to write poster') return False return self._write_image(poster_data, poster_path) @@ -683,7 +674,7 @@ class GenericMetadata(object): img_cache_type=sickgear.image_cache.ImageCache.BANNER) if not banner_data: - logger.log(u"No show banner image was retrieved, unable to write banner", logger.DEBUG) + logger.debug('No show banner image was retrieved, unable to write banner') return False return self._write_image(banner_data, banner_path) @@ -717,14 +708,13 @@ class GenericMetadata(object): season_poster_file_path = self.get_season_poster_path(show_obj, cur_season) if not season_poster_file_path: - logger.log(u'Path for season ' + str(cur_season) + ' came back blank, skipping this season', - logger.DEBUG) + logger.debug(f'Path for season {cur_season} came back blank, skipping this season') continue season_data = metadata_helpers.get_show_image(season_url, show_name=show_obj.name) if not season_data: - logger.log(u'No season poster data available, skipping this season', logger.DEBUG) + logger.debug('No season poster data available, skipping this season') continue result = result + [self._write_image(season_data, season_poster_file_path)] @@ -762,14 +752,13 @@ class GenericMetadata(object): season_banner_file_path = self.get_season_banner_path(show_obj, cur_season) if not season_banner_file_path: - logger.log(u'Path for season ' + str(cur_season) + ' came back blank, skipping this season', - logger.DEBUG) + logger.debug(f'Path for season {cur_season} came back blank, skipping this season') continue season_data = metadata_helpers.get_show_image(season_url, show_name=show_obj.name) if not season_data: - logger.log(u'No season banner data available, skipping this season', logger.DEBUG) + logger.debug('No season banner data available, skipping this season') continue result = result + [self._write_image(season_data, season_banner_file_path)] @@ -787,7 +776,7 @@ class GenericMetadata(object): img_cache_type=sickgear.image_cache.ImageCache.POSTER) if not poster_data: - logger.log(u"No show poster image was retrieved, unable to write season all poster", logger.DEBUG) + logger.debug('No show poster image was retrieved, unable to write season all poster') return False return self._write_image(poster_data, poster_path) @@ -801,7 +790,7 @@ class GenericMetadata(object): img_cache_type=sickgear.image_cache.ImageCache.BANNER) if not banner_data: - logger.log(u"No show banner image was retrieved, unable to write season all banner", logger.DEBUG) + logger.debug('No show banner image was retrieved, unable to write season all banner') return False return self._write_image(banner_data, banner_path) @@ -819,18 +808,18 @@ class GenericMetadata(object): # don't bother overwriting it if not force and os.path.isfile(image_path): - logger.log(u"Image already exists, not downloading", logger.DEBUG) + logger.debug('Image already exists, not downloading') return False if not image_data: - logger.log(u"Unable to retrieve image, skipping", logger.WARNING) + logger.warning('Unable to retrieve image, skipping') return False image_dir = os.path.dirname(image_path) try: if not os.path.isdir(image_dir): - logger.log(u"Metadata dir didn't exist, creating it at " + image_dir, logger.DEBUG) + logger.debug(f'Metadata dir didn"t exist, creating it at {image_dir}') os.makedirs(image_dir) sg_helpers.chmod_as_parent(image_dir) @@ -839,9 +828,7 @@ class GenericMetadata(object): out_file.close() sg_helpers.chmod_as_parent(image_path) except IOError as e: - logger.log( - u"Unable to write image to " + image_path + " - are you sure the show folder is writable? " + ex(e), - logger.ERROR) + logger.error(f'Unable to write image to {image_path} - are you sure the show folder is writable? {ex(e)}') return False return True @@ -869,8 +856,8 @@ class GenericMetadata(object): return t.get_show((show_obj.ids[tv_id]['id'], show_obj.prodid)[tv_src == show_obj.tvid], load_episodes=False, banners=True, posters=True, fanart=True, language=show_obj.lang) except (BaseTVinfoError, IOError) as e: - logger.log(u"Unable to look up show on " + sickgear.TVInfoAPI( - tv_id).name + ", not downloading images: " + ex(e), logger.WARNING) + logger.warning(f'Unable to look up show on {sickgear.TVInfoAPI(tv_id).name},' + f' not downloading images: {ex(e)}') # todo: when tmdb is added as tv source remove the hardcoded TVINFO_TMDB for tv_src in list(OrderedDict.fromkeys([show_obj.tvid] + list(sickgear.TVInfoAPI().search_sources) + @@ -1042,8 +1029,8 @@ class GenericMetadata(object): image_type = 'fanart' if image_type not in ('poster', 'banner', 'fanart', 'poster_thumb', 'banner_thumb'): - logger.log(u"Invalid image type " + str(image_type) + ", couldn't find it in the " + sickgear.TVInfoAPI( - show_obj.tvid).name + " object", logger.ERROR) + logger.error(f'Invalid image type {image_type}, couldn\'t find it in the' + f' {sickgear.TVInfoAPI(show_obj.tvid).name} object') return image_urls = self._retrieve_image_urls(show_obj, image_type, show_infos) @@ -1094,8 +1081,8 @@ class GenericMetadata(object): t = sickgear.TVInfoAPI(show_obj.tvid).setup(**tvinfo_config) tvinfo_obj_show = t[show_obj.prodid] except (BaseTVinfoError, IOError) as e: - logger.log(u'Unable to look up show on ' + sickgear.TVInfoAPI( - show_obj.tvid).name + ', not downloading images: ' + ex(e), logger.WARNING) + logger.warning(f'Unable to look up show on {sickgear.TVInfoAPI(show_obj.tvid).name},' + f' not downloading images: {ex(e)}') return result if not self._valid_show(tvinfo_obj_show, show_obj): @@ -1124,10 +1111,10 @@ class GenericMetadata(object): metadata_path = os.path.join(folder, self._show_metadata_filename) if not os.path.isdir(folder) or not os.path.isfile(metadata_path): - logger.log(u"Can't load the metadata file from " + repr(metadata_path) + ", it doesn't exist", logger.DEBUG) + logger.debug(f'Can\'t load the metadata file from {repr(metadata_path)}, it doesn\'t exist') return empty_return - logger.log(u"Loading show info from metadata file in " + folder, logger.DEBUG) + logger.debug(f'Loading show info from metadata file in {folder}') try: with io.open(metadata_path, 'r', encoding='utf8') as xmlFileObj: @@ -1138,11 +1125,9 @@ class GenericMetadata(object): show_xml.findtext('tvdbid'), show_xml.findtext('id'), show_xml.findtext('indexer'))): - logger.log(u"Invalid info in tvshow.nfo (missing name or id):" - + str(show_xml.findtext('title')) + ' ' - + str(show_xml.findtext('indexer')) + ' ' - + str(show_xml.findtext('tvdbid')) + ' ' - + str(show_xml.findtext('id'))) + logger.log(f'Invalid info in tvshow.nfo (missing name or id):' + f'{show_xml.findtext("title")} {show_xml.findtext("indexer")} ' + f'{show_xml.findtext("tvdbid")} {show_xml.findtext("id")}') return empty_return name = show_xml.findtext('title') @@ -1178,17 +1163,15 @@ class GenericMetadata(object): except (BaseException, Exception): pass else: - logger.log(u"Empty or field in NFO, unable to find a ID", logger.WARNING) + logger.warning('Empty or field in NFO, unable to find a ID') return empty_return if None is prodid: - logger.log(u"Invalid Show ID (%s), not using metadata file" % prodid, logger.WARNING) + logger.warning(f'Invalid Show ID (%s), not using metadata file {prodid}') return empty_return except (BaseException, Exception) as e: - logger.log( - u"There was an error parsing your existing metadata file: '" + metadata_path + "' error: " + ex(e), - logger.WARNING) + logger.warning(f'There was an error parsing your existing metadata file: "{metadata_path}" error: {ex(e)}') return empty_return return tvid, prodid, name @@ -1202,7 +1185,7 @@ class GenericMetadata(object): except (BaseException, Exception): pass - logger.log(u'Could not find any %s images on Fanart.tv for %s' % (image_type, show_obj.name), logger.DEBUG) + logger.debug(f'Could not find any {image_type} images on Fanart.tv for {show_obj.name}') @staticmethod def _fanart_urls(tvdb_id, image_type='banner', lang='en', thumb=False): diff --git a/sickgear/metadata/helpers.py b/sickgear/metadata/helpers.py index f0f5254a..0aac20c8 100644 --- a/sickgear/metadata/helpers.py +++ b/sickgear/metadata/helpers.py @@ -42,7 +42,7 @@ def get_show_image(url, img_num=None, show_name=None, supress_log=False): # if they provided a fanart number try to use it instead temp_url = url if None is img_num else url.split('-')[0] + '-' + str(img_num) + '.jpg' - logger.log(u'Fetching image from ' + temp_url, logger.DEBUG) + logger.debug(f'Fetching image from {temp_url}') from sickgear import FLARESOLVERR_HOST, MEMCACHE MEMCACHE.setdefault('cookies', {}) @@ -51,8 +51,8 @@ def get_show_image(url, img_num=None, show_name=None, supress_log=False): if None is image_data: if supress_log: return - logger.log('There was an error trying to retrieve the image%s, aborting' % - ('', ' for show: %s' % show_name)[None is not show_name], logger.WARNING) + logger.warning(f'There was an error trying to retrieve the image' + f'{("", " for show: %s" % show_name)[None is not show_name]}, aborting') return return image_data diff --git a/sickgear/metadata/kodi.py b/sickgear/metadata/kodi.py index 9723d940..5b836ad4 100644 --- a/sickgear/metadata/kodi.py +++ b/sickgear/metadata/kodi.py @@ -127,13 +127,11 @@ class KODIMetadata(generic.GenericMetadata): try: show_info = t[int(show_id)] except BaseTVinfoShownotfound as e: - logger.log('Unable to find show with id %s on %s, skipping it' % (show_id, sickgear.TVInfoAPI( - show_obj.tvid).name), logger.ERROR) + logger.error(f'Unable to find show with id {show_id} on {sickgear.TVInfoAPI(show_obj.tvid).name},' + f' skipping it') raise e except BaseTVinfoError as e: - logger.log( - '%s is down, can\'t use its data to add this show' % sickgear.TVInfoAPI(show_obj.tvid).name, - logger.ERROR) + logger.error(f'{sickgear.TVInfoAPI(show_obj.tvid).name} is down, can\'t use its data to add this show') raise e if not self._valid_show(show_info, show_obj): @@ -141,8 +139,8 @@ class KODIMetadata(generic.GenericMetadata): # check for title and id if None is getattr(show_info, 'seriesname', None) or None is getattr(show_info, 'id', None): - logger.log('Incomplete info for show with id %s on %s, skipping it' % (show_id, sickgear.TVInfoAPI( - show_obj.tvid).name), logger.ERROR) + logger.error(f'Incomplete info for show with id {show_id} on {sickgear.TVInfoAPI(show_obj.tvid).name},' + f' skipping it') return False title = etree.SubElement(tv_node, 'title') @@ -171,8 +169,8 @@ class KODIMetadata(generic.GenericMetadata): uniqueid = etree.SubElement(tv_node, 'uniqueid', **kwargs) uniqueid.text = '%s%s' % (('', 'tt')[TVINFO_IMDB == tvid], mid) if not has_id: - logger.log('Incomplete info for show with id %s on %s, skipping it' % (show_id, sickgear.TVInfoAPI( - show_obj.tvid).name), logger.ERROR) + logger.error(f'Incomplete info for show with id {show_id} on {sickgear.TVInfoAPI(show_obj.tvid).name},' + f' skipping it') return False ratings = etree.SubElement(tv_node, 'ratings') @@ -235,7 +233,7 @@ class KODIMetadata(generic.GenericMetadata): nfo_file_path = self.get_show_file_path(show_obj) - logger.log(u'Writing Kodi metadata file: %s' % nfo_file_path, logger.DEBUG) + logger.debug(f'Writing Kodi metadata file: {nfo_file_path}') data = '\n%s' % data return sg_helpers.write_file(nfo_file_path, data, utf8=True) @@ -261,7 +259,7 @@ class KODIMetadata(generic.GenericMetadata): nfo_file_path = self.get_episode_file_path(ep_obj) - logger.log(u'Writing episode metadata file: %s' % nfo_file_path, logger.DEBUG) + logger.debug(f'Writing episode metadata file: {nfo_file_path}') return sg_helpers.write_file(nfo_file_path, data, xmltree=True, xml_header=True, utf8=True) @@ -292,8 +290,8 @@ class KODIMetadata(generic.GenericMetadata): except BaseTVinfoShownotfound as e: raise exceptions_helper.ShowNotFoundException(ex(e)) except BaseTVinfoError as e: - logger.log('Unable to connect to %s while creating meta files - skipping - %s' % (sickgear.TVInfoAPI( - ep_obj.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Unable to connect to {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name}' + f' while creating meta files - skipping - {ex(e)}') return if not self._valid_show(show_info, ep_obj.show_obj): @@ -318,10 +316,10 @@ class KODIMetadata(generic.GenericMetadata): ep_info['firstaired'] = str(datetime.date.fromordinal(1)) if None is getattr(ep_info, 'episodename', None): - logger.log(u'Not generating nfo because the episode has no title', logger.DEBUG) + logger.debug('Not generating nfo because the episode has no title') return None - logger.log('Creating metadata for episode %sx%s' % (ep_obj.season, ep_obj.episode), logger.DEBUG) + logger.debug('Creating metadata for episode %sx%s' % (ep_obj.season, ep_obj.episode)) if 1 < len(ep_obj_list_to_write): ep_node = etree.SubElement(root_node, 'episodedetails') diff --git a/sickgear/metadata/mede8er.py b/sickgear/metadata/mede8er.py index 3b9759d1..b9f996f5 100644 --- a/sickgear/metadata/mede8er.py +++ b/sickgear/metadata/mede8er.py @@ -127,10 +127,10 @@ class Mede8erMetadata(mediabrowser.MediaBrowserMetadata): try: show_info = t[int(show_obj.prodid)] except BaseTVinfoShownotfound as e: - logger.log(u'Unable to find show with id ' + str(show_obj.prodid) + ' on tvdb, skipping it', logger.ERROR) + logger.error(f'Unable to find show with id {show_obj.prodid} on tvdb, skipping it') raise e except BaseTVinfoError as e: - logger.log(u'TVDB is down, can\'t use its data to make the NFO', logger.ERROR) + logger.error(f'TVDB is down, can\'t use its data to make the NFO') raise e if not self._valid_show(show_info, show_obj): @@ -142,12 +142,12 @@ class Mede8erMetadata(mediabrowser.MediaBrowserMetadata): or '' == show_info['seriesname'] \ or None is show_info['id'] \ or '' == show_info['id']: - logger.log('Incomplete info for show with id %s on %s, skipping it' % - (show_obj.prodid, sickgear.TVInfoAPI(show_obj.tvid).name), logger.ERROR) + logger.error(f'Incomplete info for show with id {show_obj.prodid}' + f' on {sickgear.TVInfoAPI(show_obj.tvid).name}, skipping it') return False except BaseTVinfoAttributenotfound: - logger.log('Incomplete info for show with id %s on %s, skipping it' % - (show_obj.prodid, sickgear.TVInfoAPI(show_obj.tvid).name), logger.ERROR) + logger.error(f'Incomplete info for show with id {show_obj.prodid}' + f' on {sickgear.TVInfoAPI(show_obj.tvid).name}, skipping it') return False SeriesName = etree.SubElement(tv_node, 'title') @@ -241,8 +241,8 @@ class Mede8erMetadata(mediabrowser.MediaBrowserMetadata): except BaseTVinfoShownotfound as e: raise exceptions_helper.ShowNotFoundException(ex(e)) except BaseTVinfoError as e: - logger.log('Unable to connect to %s while creating meta files - skipping - %s' % - (sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Unable to connect to {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name}' + f' while creating meta files - skipping - {ex(e)}') return False if not self._valid_show(show_info, ep_obj.show_obj): @@ -261,8 +261,8 @@ class Mede8erMetadata(mediabrowser.MediaBrowserMetadata): try: ep_info = show_info[cur_ep_obj.season][cur_ep_obj.episode] except (BaseException, Exception): - logger.log(u'Unable to find episode %sx%s on tvdb... has it been removed? Should I delete from db?' % - (cur_ep_obj.season, cur_ep_obj.episode)) + logger.log(f'Unable to find episode {cur_ep_obj.season}x{cur_ep_obj.episode} on tvdb...' + f' has it been removed? Should it be deleted from the db?') return None if cur_ep_obj == ep_obj: diff --git a/sickgear/metadata/mediabrowser.py b/sickgear/metadata/mediabrowser.py index d3a2947a..f8c1e8d3 100644 --- a/sickgear/metadata/mediabrowser.py +++ b/sickgear/metadata/mediabrowser.py @@ -123,7 +123,7 @@ class MediaBrowserMetadata(generic.GenericMetadata): metadata_dir_name = os.path.join(os.path.dirname(ep_obj.location), 'metadata') xml_file_path = os.path.join(metadata_dir_name, xml_file_name) else: - logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG) + logger.debug(f'Episode location doesn\'t exist: {ep_obj.location}') return '' return xml_file_path @@ -175,10 +175,10 @@ class MediaBrowserMetadata(generic.GenericMetadata): break if not season_dir: - logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG) + logger.debug(f'Unable to find a season dir for season {season}') return None - logger.log(u"Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG) + logger.debug(f'Using {season_dir}/folder.jpg as season dir for season {season}') return os.path.join(show_obj.location, season_dir, 'folder.jpg') @@ -215,10 +215,10 @@ class MediaBrowserMetadata(generic.GenericMetadata): break if not season_dir: - logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG) + logger.debug(f'Unable to find a season dir for season {season}') return None - logger.log(u"Using " + str(season_dir) + "/banner.jpg as season dir for season " + str(season), logger.DEBUG) + logger.debug(f'Using {season_dir}/banner.jpg as season dir for season {season}') return os.path.join(show_obj.location, season_dir, 'banner.jpg') @@ -252,12 +252,11 @@ class MediaBrowserMetadata(generic.GenericMetadata): try: show_info = t[int(show_obj.prodid)] except BaseTVinfoShownotfound as e: - logger.log("Unable to find show with id %s on %s, skipping it" % - (show_obj.prodid, sickgear.TVInfoAPI(show_obj.tvid).name), logger.ERROR) + logger.error(f'Unable to find show with id {show_obj.prodid} ' + f'on {sickgear.TVInfoAPI(show_obj.tvid).name}, skipping it') raise e except BaseTVinfoError as e: - logger.log("%s is down, can't use its data to make the NFO" % sickgear.TVInfoAPI(show_obj.tvid).name, - logger.ERROR) + logger.error('%s is down, can\'t use its data to make the NFO' % sickgear.TVInfoAPI(show_obj.tvid).name) raise e if not self._valid_show(show_info, show_obj): @@ -265,8 +264,8 @@ class MediaBrowserMetadata(generic.GenericMetadata): # check for title and id if None is getattr(show_info, 'seriesname', None) or None is getattr(show_info, 'id', None): - logger.log("Incomplete info for show with id %s on %s, skipping it" % - (show_obj.prodid, sickgear.TVInfoAPI(show_obj.tvid).name), logger.ERROR) + logger.error(f'Incomplete info for show with id {show_obj.prodid}' + f' on {sickgear.TVInfoAPI(show_obj.tvid).name}, skipping it') return False prodid = etree.SubElement(tv_node, "id") @@ -415,8 +414,8 @@ class MediaBrowserMetadata(generic.GenericMetadata): except BaseTVinfoShownotfound as e: raise exceptions_helper.ShowNotFoundException(ex(e)) except BaseTVinfoError as e: - logger.log("Unable to connect to %s while creating meta files - skipping - %s" % - (sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Unable to connect to {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name}' + f' while creating meta files - skipping - {ex(e)}') return False if not self._valid_show(show_info, ep_obj.show_obj): diff --git a/sickgear/metadata/tivo.py b/sickgear/metadata/tivo.py index eced781d..b3a040d3 100644 --- a/sickgear/metadata/tivo.py +++ b/sickgear/metadata/tivo.py @@ -158,7 +158,7 @@ class TIVOMetadata(generic.GenericMetadata): metadata_dir_name = os.path.join(os.path.dirname(ep_obj.location), '.meta') metadata_file_path = os.path.join(metadata_dir_name, metadata_file_name) else: - logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG) + logger.debug(f'Episode location doesn\'t exist: {ep_obj.location}') return '' return metadata_file_path @@ -203,8 +203,8 @@ class TIVOMetadata(generic.GenericMetadata): except BaseTVinfoShownotfound as e: raise exceptions_helper.ShowNotFoundException(ex(e)) except BaseTVinfoError as e: - logger.log("Unable to connect to %s while creating meta files - skipping - %s" % - (sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Unable to connect to {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name}' + f' while creating meta files - skipping - {ex(e)}') return False if not self._valid_show(show_info, ep_obj.show_obj): @@ -251,10 +251,10 @@ class TIVOMetadata(generic.GenericMetadata): # Write the synopsis of the video here sanitizedDescription = cur_ep_obj.description # Replace double curly quotes - sanitizedDescription = sanitizedDescription.replace(u"\u201c", "\"").replace(u"\u201d", "\"") + sanitizedDescription = sanitizedDescription.replace('\u201c', '"').replace('\u201d', '"') # Replace single curly quotes - sanitizedDescription = sanitizedDescription.replace(u"\u2018", "'").replace(u"\u2019", "'").replace( - u"\u02BC", "'") + sanitizedDescription = sanitizedDescription.replace('\u2018', '\'').replace('\u2019', '\'').replace( + '\u02BC', '\'') data += ("description : " + sanitizedDescription + "\n") @@ -337,11 +337,11 @@ class TIVOMetadata(generic.GenericMetadata): try: if not os.path.isdir(nfo_file_dir): - logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG) + logger.debug(f'Metadata dir didn\'t exist, creating it at {nfo_file_dir}') os.makedirs(nfo_file_dir) sg_helpers.chmod_as_parent(nfo_file_dir) - logger.log(u"Writing episode nfo file to " + nfo_file_path, logger.DEBUG) + logger.debug(f'Writing episode nfo file to {nfo_file_path}') with open(nfo_file_path, 'w') as nfo_file: # Calling encode directly, b/c often descriptions have wonky characters. @@ -350,8 +350,7 @@ class TIVOMetadata(generic.GenericMetadata): sg_helpers.chmod_as_parent(nfo_file_path) except EnvironmentError as e: - logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e), - logger.ERROR) + logger.error(f'Unable to write file to {nfo_file_path} - are you sure the folder is writable? {ex(e)}') return False return True diff --git a/sickgear/metadata/wdtv.py b/sickgear/metadata/wdtv.py index 0864e43d..9dda6a5d 100644 --- a/sickgear/metadata/wdtv.py +++ b/sickgear/metadata/wdtv.py @@ -168,10 +168,10 @@ class WDTVMetadata(generic.GenericMetadata): break if not season_dir: - logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG) + logger.debug(f'Unable to find a season dir for season {season}') return None - logger.log(u"Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG) + logger.debug(f'Using {season_dir}/folder.jpg as season dir for season {season}') return os.path.join(show_obj.location, season_dir, 'folder.jpg') @@ -204,8 +204,8 @@ class WDTVMetadata(generic.GenericMetadata): except BaseTVinfoShownotfound as e: raise exceptions_helper.ShowNotFoundException(ex(e)) except BaseTVinfoError as e: - logger.log("Unable to connect to %s while creating meta files - skipping - %s" % - (sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Unable to connect to {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name}' + f' while creating meta files - skipping - {ex(e)}') return False if not self._valid_show(show_info, ep_obj.show_obj): diff --git a/sickgear/metadata/xbmc_12plus.py b/sickgear/metadata/xbmc_12plus.py index f0b709a4..57b0f2de 100644 --- a/sickgear/metadata/xbmc_12plus.py +++ b/sickgear/metadata/xbmc_12plus.py @@ -123,12 +123,11 @@ class XBMC12PlusMetadata(generic.GenericMetadata): try: show_info = t[int(show_id)] except BaseTVinfoShownotfound as e: - logger.log('Unable to find show with id %s on %s, skipping it' % - (show_id, sickgear.TVInfoAPI(show_obj.tvid).name), logger.ERROR) + logger.error(f'Unable to find show with id {show_id} on {sickgear.TVInfoAPI(show_obj.tvid).name},' + f' skipping it') raise e except BaseTVinfoError as e: - logger.log('%s is down, can\'t use its data to add this show' % sickgear.TVInfoAPI(show_obj.tvid).name, - logger.ERROR) + logger.error('%s is down, can\'t use its data to add this show' % sickgear.TVInfoAPI(show_obj.tvid).name) raise e if not self._valid_show(show_info, show_obj): @@ -136,8 +135,8 @@ class XBMC12PlusMetadata(generic.GenericMetadata): # check for title and id if None is getattr(show_info, 'seriesname', None) or None is getattr(show_info, 'id', None): - logger.log('Incomplete info for show with id %s on %s, skipping it' % - (show_id, sickgear.TVInfoAPI(show_obj.tvid).name), logger.ERROR) + logger.error(f'Incomplete info for show with id {show_id} on {sickgear.TVInfoAPI(show_obj.tvid).name},' + f' skipping it') return False title = etree.SubElement(tv_node, 'title') @@ -227,8 +226,9 @@ class XBMC12PlusMetadata(generic.GenericMetadata): except BaseTVinfoShownotfound as e: raise exceptions_helper.ShowNotFoundException(ex(e)) except BaseTVinfoError as e: - logger.log('Unable to connect to %s while creating meta files - skipping - %s' % - (sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error( + f'Unable to connect to {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name} while creating meta files' + f' - skipping - {ex(e)}') return if not self._valid_show(show_info, ep_obj.show_obj): @@ -249,17 +249,17 @@ class XBMC12PlusMetadata(generic.GenericMetadata): (cur_ep_obj.season, cur_ep_obj.episode, sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name)) return None except (BaseException, Exception): - logger.log(u'Not generating nfo because failed to fetched tv info data at this time', logger.DEBUG) + logger.debug('Not generating nfo because failed to fetched tv info data at this time') return None if None is getattr(ep_info, 'firstaired', None): ep_info['firstaired'] = str(datetime.date.fromordinal(1)) if None is getattr(ep_info, 'episodename', None): - logger.log(u'Not generating nfo because the ep has no title', logger.DEBUG) + logger.debug('Not generating nfo because the ep has no title') return None - logger.log(u'Creating metadata for episode ' + str(ep_obj.season) + 'x' + str(ep_obj.episode), logger.DEBUG) + logger.debug(f'Creating metadata for episode {ep_obj.season}x{ep_obj.episode}') if 1 < len(ep_obj_list_to_write): episode = etree.SubElement(rootNode, 'episodedetails') diff --git a/sickgear/name_parser/parser.py b/sickgear/name_parser/parser.py index c1769f1b..46cf2fae 100644 --- a/sickgear/name_parser/parser.py +++ b/sickgear/name_parser/parser.py @@ -98,7 +98,7 @@ class NameParser(object): cur_pattern = strip_comment.sub('', cur_pattern) cur_regex = re.compile('(?x)' + cur_pattern, re.VERBOSE | re.IGNORECASE) except re.error as errormsg: - logger.log(u'WARNING: Invalid episode_pattern, %s. %s' % (errormsg, cur_pattern)) + logger.log(f'WARNING: Invalid episode_pattern, {errormsg}. {cur_pattern}') else: cls.compiled_regexes[index].append([cur_pattern_num, cur_pattern_name, cur_regex]) index += 1 @@ -380,12 +380,11 @@ class NameParser(object): season_number = int(ep_obj['seasonnumber']) episode_numbers = [int(ep_obj['episodenumber'])] except BaseTVinfoEpisodenotfound: - logger.warning(u'Unable to find episode with date %s for show %s, skipping' % - (best_result.air_date, show_obj.unique_name)) + logger.warning(f'Unable to find episode with date {best_result.air_date}' + f' for show {show_obj.unique_name}, skipping') episode_numbers = [] except BaseTVinfoError as e: - logger.log(u'Unable to contact ' + sickgear.TVInfoAPI(show_obj.tvid).name - + ': ' + ex(e), logger.WARNING) + logger.warning(f'Unable to contact {sickgear.TVInfoAPI(show_obj.tvid).name}: {ex(e)}') episode_numbers = [] for epNo in episode_numbers: @@ -468,9 +467,8 @@ class NameParser(object): best_result.season_number = new_season_numbers[0] if self.convert and show_obj.is_scene: - logger.log(u'Converted parsed result %s into %s' - % (best_result.original_name, decode_str(str(best_result), errors='xmlcharrefreplace')), - logger.DEBUG) + logger.debug(f'Converted parsed result {best_result.original_name}' + f' into {decode_str(best_result, errors="xmlcharrefreplace")}') helpers.cpu_sleep() @@ -646,7 +644,7 @@ class NameParser(object): and any('anime' in wr for wr in final_result.which_regex) == bool(final_result.show_obj.is_anime): name_parser_cache.add(name, final_result) - logger.log(u'Parsed %s into %s' % (name, final_result), logger.DEBUG) + logger.debug(f'Parsed {name} into {final_result}') return final_result @@ -752,9 +750,9 @@ class ParseResult(LegacyParseResult): def __unicode__(self): if None is not self.series_name: - to_return = self.series_name + u' - ' + to_return = f'{self.series_name} - ' else: - to_return = u'' + to_return = '' if None is not self.season_number: to_return += 'S' + str(self.season_number) if self.episode_numbers and len(self.episode_numbers): @@ -863,7 +861,7 @@ class NameParserCache(object): key = self._previous_parsed.first_key() del self._previous_parsed[key] except KeyError: - logger.log('Could not remove old NameParserCache entry: %s' % key, logger.DEBUG) + logger.debug('Could not remove old NameParserCache entry: %s' % key) def get(self, name): # type: (AnyStr) -> ParseResult @@ -876,7 +874,7 @@ class NameParserCache(object): """ with self.lock: if name in self._previous_parsed: - logger.log('Using cached parse result for: ' + name, logger.DEBUG) + logger.debug('Using cached parse result for: ' + name) self._previous_parsed.move_to_end(name) return self._previous_parsed[name] diff --git a/sickgear/naming.py b/sickgear/naming.py index 6d34d227..3d2378b2 100644 --- a/sickgear/naming.py +++ b/sickgear/naming.py @@ -165,11 +165,11 @@ def check_valid_naming(pattern=None, multi=None, anime_type=None): if None is anime_type: anime_type = sickgear.NAMING_ANIME - logger.log(u'Checking whether the pattern %s is valid for a single episode' % pattern, logger.DEBUG) + logger.debug(f'Checking whether the pattern {pattern} is valid for a single episode') valid = validate_name(pattern, None, anime_type) if None is not multi: - logger.log(u'Checking whether the pattern %s is valid for a multi episode' % pattern, logger.DEBUG) + logger.debug(f'Checking whether the pattern {pattern} is valid for a multi episode') valid = valid and validate_name(pattern, multi, anime_type) return valid @@ -188,7 +188,7 @@ def check_valid_abd_naming(pattern=None): if None is pattern: pattern = sickgear.NAMING_PATTERN - logger.log(u'Checking whether the pattern %s is valid for an air-by-date episode' % pattern, logger.DEBUG) + logger.debug(f'Checking whether the pattern {pattern} is valid for an air-by-date episode') valid = validate_name(pattern, abd=True) return valid @@ -207,7 +207,7 @@ def check_valid_sports_naming(pattern=None): if None is pattern: pattern = sickgear.NAMING_PATTERN - logger.log(u'Checking whether the pattern %s is valid for an sports episode' % pattern, logger.DEBUG) + logger.debug(f'Checking whether the pattern {pattern} is valid for an sports episode') valid = validate_name(pattern, sports=True) return valid @@ -233,43 +233,43 @@ def validate_name(pattern, multi=None, anime_type=None, file_only=False, abd=Fal """ sample_ep_obj = generate_sample_ep(multi, abd, sports, anime_type=anime_type) - new_name = u'%s.ext' % sample_ep_obj.formatted_filename(pattern, multi, anime_type) + new_name = f'{sample_ep_obj.formatted_filename(pattern, multi, anime_type)}.ext' new_path = sample_ep_obj.formatted_dir(pattern, multi) if not file_only: new_name = os.path.join(new_path, new_name) if not new_name: - logger.log(u'Unable to create a name out of %s' % pattern, logger.DEBUG) + logger.debug(f'Unable to create a name out of {pattern}') return False - logger.log(u'Trying to parse %s' % new_name, logger.DEBUG) + logger.debug(f'Trying to parse {new_name}') parser = NameParser(True, show_obj=sample_ep_obj.show_obj, naming_pattern=True) try: result = parser.parse(new_name) except (BaseException, Exception): - logger.log(u'Unable to parse %s, not valid' % new_name, logger.DEBUG) + logger.debug(f'Unable to parse {new_name}, not valid') return False - logger.log(u'The name %s parsed into %s' % (new_name, result), logger.DEBUG) + logger.debug(f'The name {new_name} parsed into {result}') if abd or sports: if result.air_date != sample_ep_obj.airdate: - logger.log(u'Air date incorrect in parsed episode, pattern isn\'t valid', logger.DEBUG) + logger.debug('Air date incorrect in parsed episode, pattern isn\'t valid') return False elif 3 == anime_type: if result.season_number != sample_ep_obj.season: - logger.log(u'Season number incorrect in parsed episode, pattern isn\'t valid', logger.DEBUG) + logger.debug('Season number incorrect in parsed episode, pattern isn\'t valid') return False if result.episode_numbers != [x.episode for x in [sample_ep_obj] + sample_ep_obj.related_ep_obj]: - logger.log(u'Episode numbering incorrect in parsed episode, pattern isn\'t valid', logger.DEBUG) + logger.debug('Episode numbering incorrect in parsed episode, pattern isn\'t valid') return False else: if len(result.ab_episode_numbers) \ and result.ab_episode_numbers != [x.absolute_number for x in [sample_ep_obj] + sample_ep_obj.related_ep_obj]: - logger.log(u'Absolute numbering incorrect in parsed episode, pattern isn\'t valid', logger.DEBUG) + logger.debug('Absolute numbering incorrect in parsed episode, pattern isn\'t valid') return False return True diff --git a/sickgear/network_timezones.py b/sickgear/network_timezones.py index c6c548ce..abedde55 100644 --- a/sickgear/network_timezones.py +++ b/sickgear/network_timezones.py @@ -156,9 +156,9 @@ def _remove_old_zoneinfo(): for _dir in (sickgear.ZONEINFO_DIR, )]): # type: DirEntry if current_file != entry.path: if remove_file_perm(entry.path, log_err=False): - logger.log(u'Delete unneeded old zoneinfo File: %s' % entry.path) + logger.log(f'Delete unneeded old zoneinfo File: {entry.path}') else: - logger.log(u'Unable to delete: %s' % entry.path, logger.ERROR) + logger.error(f'Unable to delete: {entry.path}') def _update_zoneinfo(): @@ -175,16 +175,15 @@ def _update_zoneinfo(): if None is url_data: update_last_retry() # when None is urlData, trouble connecting to GitHub - logger.log(u'Fetching zoneinfo.txt failed, this can happen from time to time. Unable to get URL: %s' % url, - logger.WARNING) + logger.warning(f'Fetching zoneinfo.txt failed, this can happen from time to time. Unable to get URL: {url}') return reset_last_retry() try: - (new_zoneinfo, zoneinfo_md5) = url_data.strip().rsplit(u' ') + (new_zoneinfo, zoneinfo_md5) = url_data.strip().rsplit(' ') except (BaseException, Exception): - logger.log('Fetching zoneinfo.txt failed, update contains unparsable data: %s' % url_data, logger.DEBUG) + logger.debug('Fetching zoneinfo.txt failed, update contains unparsable data: %s' % url_data) return current_file = zoneinfo.ZONEFILENAME @@ -206,25 +205,25 @@ def _update_zoneinfo(): return # load the new zoneinfo - url_tar = u'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/%s' % new_zoneinfo + url_tar = f'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/{new_zoneinfo}' zonefile_tmp = re.sub(r'\.tar\.gz$', '.tmp', zonefile) if not remove_file_perm(zonefile_tmp, log_err=False): - logger.log(u'Unable to delete: %s' % zonefile_tmp, logger.ERROR) + logger.error(f'Unable to delete: {zonefile_tmp}') return if not helpers.download_file(url_tar, zonefile_tmp): return if not os.path.exists(zonefile_tmp): - logger.log(u'Download of %s failed.' % zonefile_tmp, logger.ERROR) + logger.error(f'Download of {zonefile_tmp} failed.') return new_hash = str(helpers.md5_for_file(zonefile_tmp)) if zoneinfo_md5.upper() == new_hash.upper(): - logger.log(u'Updating timezone info with new one: %s' % new_zoneinfo, logger.MESSAGE) + logger.log(f'Updating timezone info with new one: {new_zoneinfo}', logger.MESSAGE) try: # remove the old zoneinfo file if None is not current_file: @@ -245,7 +244,7 @@ def _update_zoneinfo(): return else: remove_file_perm(zonefile_tmp, log_err=False) - logger.log(u'MD5 hash does not match: %s File: %s' % (zoneinfo_md5.upper(), new_hash.upper()), logger.ERROR) + logger.error(f'MD5 hash does not match: {zoneinfo_md5.upper()} File: {new_hash.upper()}') return @@ -270,7 +269,7 @@ def update_network_dict(): if url_data in (None, ''): update_last_retry() # When None is urlData, trouble connecting to GitHub - logger.debug(u'Updating network timezones failed, this can happen from time to time. URL: %s' % url) + logger.debug(f'Updating network timezones failed, this can happen from time to time. URL: {url}') load_network_dict(load=False) return @@ -279,7 +278,7 @@ def update_network_dict(): try: for line in url_data.splitlines(): try: - (name, tzone) = line.strip().rsplit(u':', 1) + (name, tzone) = line.strip().rsplit(':', 1) except (BaseException, Exception): continue if None is name or None is tzone: @@ -512,14 +511,14 @@ def _load_network_conversions(): if url_data in (None, ''): update_last_retry() # when no url_data, trouble connecting to GitHub - logger.debug(u'Updating network conversions failed, this can happen from time to time. URL: %s' % url) + logger.debug(f'Updating network conversions failed, this can happen from time to time. URL: {url}') return reset_last_retry() try: for line in url_data.splitlines(): - (tvdb_network, tvrage_network, tvrage_country) = line.strip().rsplit(u'::', 2) + (tvdb_network, tvrage_network, tvrage_country) = line.strip().rsplit('::', 2) if not (tvdb_network and tvrage_network and tvrage_country): continue conversions_in.append( diff --git a/sickgear/notifiers/boxcar2.py b/sickgear/notifiers/boxcar2.py index c8d8f16f..b6d2ecd2 100644 --- a/sickgear/notifiers/boxcar2.py +++ b/sickgear/notifiers/boxcar2.py @@ -72,7 +72,7 @@ class Boxcar2Notifier(Notifier): except urllib.error.HTTPError as e: if not hasattr(e, 'code'): - self._log_error(u'Notification failed: %s' % ex(e)) + self._log_error(f'Notification failed: {ex(e)}') else: result = 'Notification failed. Error code: %s' % e.code self._log_error(result) @@ -91,7 +91,7 @@ class Boxcar2Notifier(Notifier): result = 'Wrong data sent to Boxcar' self._log_error(result) except urllib.error.URLError as e: - self._log_error(u'Notification failed: %s' % ex(e)) + self._log_error(f'Notification failed: ex(e)') return self._choose((True, 'Failed to send notification: %s' % result)[bool(result)], not bool(result)) diff --git a/sickgear/notifiers/emailnotify.py b/sickgear/notifiers/emailnotify.py index 68c51bbd..d4dab8f5 100644 --- a/sickgear/notifiers/emailnotify.py +++ b/sickgear/notifiers/emailnotify.py @@ -44,8 +44,8 @@ class EmailNotifier(Notifier): use_tls = 1 == sickgear.helpers.try_int(use_tls) login = any(user) and any(pwd) - self._log_debug(u'Sendmail HOST: %s; PORT: %s; LOGIN: %s, TLS: %s, USER: %s, FROM: %s, TO: %s' % ( - host, port, login, use_tls, user, smtp_from, to)) + self._log_debug(f'Sendmail HOST: {host}; PORT: {port};' + f' LOGIN: {login}, TLS: {use_tls}, USER: {user}, FROM: {smtp_from}, TO: {to}') try: srv = smtplib.SMTP(host, int(port)) @@ -54,16 +54,16 @@ class EmailNotifier(Notifier): if use_tls or login: srv.ehlo() - self._log_debug(u'Sent initial EHLO command') + self._log_debug('Sent initial EHLO command') if use_tls: srv.starttls() srv.ehlo() - self._log_debug(u'Sent STARTTLS and EHLO command') + self._log_debug('Sent STARTTLS and EHLO command') if login: srv.login(user, pwd) - self._log_debug(u'Sent LOGIN command') + self._log_debug('Sent LOGIN command') srv.sendmail(smtp_from, to, msg.as_string()) srv.quit() @@ -101,10 +101,10 @@ class EmailNotifier(Notifier): show_name = body.split(' - ')[0] to = self._get_recipients(show_name) if not any(to): - self._log_warning(u'No email recipients to notify, skipping') + self._log_warning('No email recipients to notify, skipping') return - self._log_debug(u'Email recipients to notify: %s' % to) + self._log_debug(f'Email recipients to notify: {to}') try: msg = MIMEMultipart('alternative') @@ -131,9 +131,9 @@ class EmailNotifier(Notifier): msg['Date'] = formatdate(localtime=True) if self._sendmail(sickgear.EMAIL_HOST, sickgear.EMAIL_PORT, sickgear.EMAIL_FROM, sickgear.EMAIL_TLS, sickgear.EMAIL_USER, sickgear.EMAIL_PASSWORD, to, msg): - self._log_debug(u'%s notification sent to [%s] for "%s"' % (title, to, body)) + self._log_debug(f'{title} notification sent to [{to}] for "{body}"') else: - self._log_error(u'%s notification ERROR: %s' % (title, self.last_err)) + self._log_error(f'{title} notification ERROR: {self.last_err}') def test_notify(self, host, port, smtp_from, use_tls, user, pwd, to): self._testing = True diff --git a/sickgear/notifiers/emby.py b/sickgear/notifiers/emby.py index 81065c00..91e3e27e 100644 --- a/sickgear/notifiers/emby.py +++ b/sickgear/notifiers/emby.py @@ -61,7 +61,7 @@ class EmbyNotifier(Notifier): """ hosts, keys, message = self._check_config() if not hosts: - self._log_warning(u'Issue with hosts or api keys, check your settings') + self._log_warning('Issue with hosts or api keys, check your settings') return False from sickgear.indexers import indexer_config @@ -98,10 +98,10 @@ class EmbyNotifier(Notifier): timeout=20, hooks=dict(response=self._cb_response), **args) # Emby will initiate a LibraryMonitor path refresh one minute after this success if self.response and 204 == self.response.get('status_code') and self.response.get('ok'): - self._log(u'Success: update %s sent to host %s in a library updated call' % (mode_to_log, cur_host)) + self._log(f'Success: update {mode_to_log} sent to host {cur_host} in a library updated call') continue elif self.response and 401 == self.response.get('status_code'): - self._log_warning(u'Failed to authenticate with %s' % cur_host) + self._log_warning(f'Failed to authenticate with {cur_host}') elif self.response and 404 == self.response.get('status_code'): self.response = None sickgear.helpers.get_url( @@ -109,16 +109,16 @@ class EmbyNotifier(Notifier): headers={'Content-type': 'application/json', 'X-MediaBrowser-Token': keys[i]}, timeout=20, hooks=dict(response=self._cb_response), post_json={'Path': '', 'UpdateType': ''}) if self.response and 204 == self.response.get('status_code') and self.response.get('ok'): - self._log(u'Success: fallback to sending Library/Media/Updated call' - u' to scan all shows at host %s' % cur_host) + self._log(f'Success: fallback to sending Library/Media/Updated call' + f' to scan all shows at host {cur_host}') continue - self._log_debug(u'Warning, Library update responded 404 not found and' - u' fallback to new /Library/Media/Updated api call failed at %s' % cur_host) + self._log_debug(f'Warning, Library update responded 404 not found and' + f' fallback to new /Library/Media/Updated api call failed at {cur_host}') elif not response and not self.response or not self.response.get('ok'): - self._log_warning(u'Warning, could not connect with server at %s' % cur_host) + self._log_warning(f'Warning, could not connect with server at {cur_host}') else: - self._log_debug(u'Warning, unknown response %sfrom %s, can most likely be ignored' - % (self.response and '%s ' % self.response.get('status_code') or '', cur_host)) + self._log_debug(f'Warning, unknown response %sfrom {cur_host}, can most likely be ignored' + % (self.response and '%s ' % self.response.get('status_code') or '')) total_success = False return total_success @@ -181,7 +181,7 @@ class EmbyNotifier(Notifier): if len(hosts) != len(apikeys): message = ('Not enough Api keys for hosts', 'More Api keys than hosts')[len(apikeys) > len(hosts)] - self._log_warning(u'%s, check your settings' % message) + self._log_warning(f'{message}, check your settings') return False, False, message return hosts, apikeys, 'OK' @@ -215,12 +215,12 @@ class EmbyNotifier(Notifier): if self.response and 401 == self.response.get('status_code'): success = False message += ['Fail: Cannot authenticate API key with %s' % cur_host] - self._log_warning(u'Failed to authenticate with %s' % cur_host) + self._log_warning(f'Failed to authenticate with {cur_host}') continue elif not response and not self.response or not self.response.get('ok'): success = False message += ['Fail: No supported Emby server found at %s' % cur_host] - self._log_warning(u'Warning, could not connect with server at ' + cur_host) + self._log_warning(f'Warning, could not connect with server at {cur_host}') continue message += ['OK: %s' % cur_host] diff --git a/sickgear/notifiers/generic.py b/sickgear/notifiers/generic.py index ce4d98f2..a9f14ff8 100644 --- a/sickgear/notifiers/generic.py +++ b/sickgear/notifiers/generic.py @@ -25,7 +25,7 @@ notify_strings = dict( git_updated='SickGear updated', git_updated_text='SickGear updated to commit#: ', test_title='SickGear notification test', - test_body=u'Success testing %s settings from SickGear ʕ•ᴥ•ʔ', + test_body='Success testing %s settings from SickGear ʕ•ᴥ•ʔ', ) @@ -40,7 +40,7 @@ class BaseNotifier(object): return 'https://raw.githubusercontent.com/SickGear/SickGear/main/gui/slick/images/ico/' + self.sg_logo_file def _log(self, msg, level=logger.MESSAGE): - logger.log(u'%s: %s' % (self.name, msg), level) + logger.log(f'{self.name}: {msg}', level) def _log_debug(self, msg): self._log(msg, logger.DEBUG) @@ -108,7 +108,7 @@ class BaseNotifier(object): @staticmethod def _body_only(title, body): # don't use title with updates or testing, as only one str is used - return body if 'SickGear' in title else u'%s: %s' % (title, body.replace('#: ', '# ')) + return body if 'SickGear' in title else f'{title}: {body.replace("#: ", "# ")}' class Notifier(BaseNotifier): @@ -136,7 +136,7 @@ class Notifier(BaseNotifier): self._pre_notify('git_updated', notify_strings['git_updated_text'] + new_version, **kwargs) def _pre_notify(self, notify_string, message, *args, **kwargs): - self._log_debug(u'Sending notification "%s"' % (self._body_only(notify_strings[notify_string], message))) + self._log_debug(f'Sending notification "{self._body_only(notify_strings[notify_string], message)}"') try: return self._notify(notify_strings[notify_string], message, *args, **kwargs) except (BaseException, Exception): diff --git a/sickgear/notifiers/growl.py b/sickgear/notifiers/growl.py index 4eaa872e..ffc51c9a 100644 --- a/sickgear/notifiers/growl.py +++ b/sickgear/notifiers/growl.py @@ -94,7 +94,7 @@ class GrowlNotifier(Notifier): success = True except (BaseException, Exception) as e: - self._log_warning(u'Unable to send growl to %s:%s - %s' % (opts['host'], opts['port'], ex(e))) + self._log_warning(f'Unable to send growl to {opts["host"]}:{opts["port"]} - {ex(e)}') return success diff --git a/sickgear/notifiers/kodi.py b/sickgear/notifiers/kodi.py index 7b652f17..5fb783e8 100644 --- a/sickgear/notifiers/kodi.py +++ b/sickgear/notifiers/kodi.py @@ -94,7 +94,7 @@ class KodiNotifier(Notifier): Returns: True if processing succeeded with no issues else False if any issues found """ if not sickgear.KODI_HOST: - self._log_warning(u'No Kodi hosts specified, check your settings') + self._log_warning('No Kodi hosts specified, check your settings') return False # either update each host, or only attempt to update until one successful result @@ -108,7 +108,7 @@ class KodiNotifier(Notifier): response = self._send_json(cur_host, dict(method='Profiles.GetCurrentProfile')) if self.response and 401 == self.response.get('status_code'): - self._log_debug(u'Failed to authenticate with %s' % cur_host) + self._log_debug(f'Failed to authenticate with {cur_host}') continue if not response: self._maybe_log_failed_detection(cur_host) @@ -117,7 +117,7 @@ class KodiNotifier(Notifier): if self._send_library_update(cur_host, show_name): only_first.update(dict(profile=response.get('label') or 'Master', host=cur_host)) self._log('Success: profile;' + - u'"%(profile)s" at%(first)s host;%(host)s updated%(show)s%(first_note)s' % only_first) + '"%(profile)s" at%(first)s host;%(host)s updated%(show)s%(first_note)s' % only_first) else: self._maybe_log_failed_detection(cur_host) result += 1 @@ -148,10 +148,10 @@ class KodiNotifier(Notifier): failed_msg = 'Single show update failed,' if sickgear.KODI_UPDATE_FULL: - self._log_debug(u'%s falling back to full update' % failed_msg) + self._log_debug(f'{failed_msg} falling back to full update') return __method_update(host) - self._log_debug(u'%s consider enabling "Perform full library update" in config/notifications' % failed_msg) + self._log_debug(f'{failed_msg} consider enabling "Perform full library update" in config/notifications') return False ############################################################################## @@ -169,7 +169,7 @@ class KodiNotifier(Notifier): """ if not host: - self._log_warning(u'No host specified, aborting update') + self._log_warning('No host specified, aborting update') return False args = {} @@ -198,14 +198,14 @@ class KodiNotifier(Notifier): """ if not host: - self._log_warning(u'No host specified, aborting update') + self._log_warning('No host specified, aborting update') return False - self._log_debug(u'Updating library via HTTP method for host: %s' % host) + self._log_debug(f'Updating library via HTTP method for host: {host}') # if we're doing per-show if show_name: - self._log_debug(u'Updating library via HTTP method for show %s' % show_name) + self._log_debug(f'Updating library via HTTP method for show {show_name}') # noinspection SqlResolve path_sql = 'SELECT path.strPath' \ @@ -223,29 +223,28 @@ class KodiNotifier(Notifier): # sql used to grab path(s) response = self._send(host, {'command': 'QueryVideoDatabase(%s)' % path_sql}) if not response: - self._log_debug(u'Invalid response for %s on %s' % (show_name, host)) + self._log_debug(f'Invalid response for {show_name} on {host}') return False try: et = etree.fromstring(quote(response, ':\\/<>')) except SyntaxError as e: - self._log_error(u'Unable to parse XML in response: %s' % ex(e)) + self._log_error(f'Unable to parse XML in response: {ex(e)}') return False paths = et.findall('.//field') if not paths: - self._log_debug(u'No valid path found for %s on %s' % (show_name, host)) + self._log_debug(f'No valid path found for {show_name} on {host}') return False for path in paths: # we do not need it double-encoded, gawd this is dumb un_enc_path = decode_str(unquote(path.text), sickgear.SYS_ENCODING) - self._log_debug(u'Updating %s on %s at %s' % (show_name, host, un_enc_path)) + self._log_debug(f'Updating {show_name} on {host} at {un_enc_path}') if not self._send( host, dict(command='ExecBuiltIn', parameter='Kodi.updatelibrary(video, %s)' % un_enc_path)): - self._log_error(u'Update of show directory failed for %s on %s at %s' - % (show_name, host, un_enc_path)) + self._log_error(f'Update of show directory failed for {show_name} on {host} at {un_enc_path}') return False # sleep for a few seconds just to be sure kodi has a chance to finish each directory @@ -253,10 +252,10 @@ class KodiNotifier(Notifier): time.sleep(5) # do a full update if requested else: - self._log_debug(u'Full library update on host: %s' % host) + self._log_debug(f'Full library update on host: {host}') if not self._send(host, dict(command='ExecBuiltIn', parameter='Kodi.updatelibrary(video)')): - self._log_error(u'Failed full library update on: %s' % host) + self._log_error(f'Failed full library update on: {host}') return False return True @@ -277,7 +276,7 @@ class KodiNotifier(Notifier): result = {} if not host: - self._log_warning(u'No host specified, aborting update') + self._log_warning('No host specified, aborting update') return result if isinstance(command, dict): @@ -300,8 +299,8 @@ class KodiNotifier(Notifier): if not response.get('error'): return 'OK' == response.get('result') and {'OK': True} or response.get('result') - self._log_error(u'API error; %s from %s in response to command: %s' - % (json_dumps(response['error']), host, json_dumps(command))) + self._log_error(f'API error; {json_dumps(response["error"])} from {host}' + f' in response to command: {json_dumps(command)}') return result def _update_json(self, host=None, show_name=None): @@ -317,12 +316,12 @@ class KodiNotifier(Notifier): """ if not host: - self._log_warning(u'No host specified, aborting update') + self._log_warning('No host specified, aborting update') return False # if we're doing per-show if show_name: - self._log_debug(u'JSON library update. Host: %s Show: %s' % (host, show_name)) + self._log_debug(f'JSON library update. Host: {host} Show: {show_name}') # try fetching tvshowid using show_name with a fallback to getting show list show_name = unquote_plus(show_name) @@ -339,7 +338,7 @@ class KodiNotifier(Notifier): break if not shows: - self._log_debug(u'No items in GetTVShows response') + self._log_debug('No items in GetTVShows response') return False tvshowid = -1 @@ -354,7 +353,7 @@ class KodiNotifier(Notifier): # we didn't find the show (exact match), thus revert to just doing a full update if enabled if -1 == tvshowid: - self._log_debug(u'Doesn\'t have "%s" in it\'s known shows, full library update required' % show_name) + self._log_debug(f'Doesn\'t have "{show_name}" in it\'s known shows, full library update required') return False # lookup tv-show path if we don't already know it @@ -365,24 +364,24 @@ class KodiNotifier(Notifier): path = 'tvshowdetails' in response and response['tvshowdetails'].get('file', '') or '' if not len(path): - self._log_warning(u'No valid path found for %s with ID: %s on %s' % (show_name, tvshowid, host)) + self._log_warning(f'No valid path found for {show_name} with ID: {tvshowid} on {host}') return False - self._log_debug(u'Updating %s on %s at %s' % (show_name, host, path)) + self._log_debug(f'Updating {show_name} on {host} at {path}') command = dict(method='VideoLibrary.Scan', params={'directory': '%s' % json_dumps(path)[1:-1].replace('\\\\', '\\')}) response_scan = self._send_json(host, command) if not response_scan.get('OK'): - self._log_error(u'Update of show directory failed for %s on %s at %s response: %s' % - (show_name, host, path, response_scan)) + self._log_error(f'Update of show directory failed for {show_name} on {host} at {path}' + f' response: {response_scan}') return False # do a full update if requested else: - self._log_debug(u'Full library update on host: %s' % host) + self._log_debug(f'Full library update on host: {host}') response_scan = self._send_json(host, dict(method='VideoLibrary.Scan')) if not response_scan.get('OK'): - self._log_error(u'Failed full library update on: %s response: %s' % (host, response_scan)) + self._log_error(f'Failed full library update on: {host} response: {response_scan}') return False return True @@ -400,7 +399,7 @@ class KodiNotifier(Notifier): def _maybe_log_failed_detection(self, host, msg='connect to'): - self._maybe_log(u'Failed to %s %s, check device(s) and config' % (msg, host), logger.ERROR) + self._maybe_log(f'Failed to {msg} {host}, check device(s) and config', logger.ERROR) def _notify(self, title, body, hosts=None, username=None, password=None, **kwargs): """ Internal wrapper for the notify_snatch and notify_download functions @@ -429,20 +428,20 @@ class KodiNotifier(Notifier): if self.response and 401 == self.response.get('status_code'): success = False message += ['Fail: Cannot authenticate with %s' % cur_host] - self._log_debug(u'Failed to authenticate with %s' % cur_host) + self._log_debug(f'Failed to authenticate with {cur_host}') elif not api_version: success = False message += ['Fail: No supported Kodi found at %s' % cur_host] self._maybe_log_failed_detection(cur_host, 'connect and detect version for') else: if 4 >= api_version: - self._log_debug(u'Detected %sversion <= 11, using HTTP API' - % self.prefix and ' ' + self.prefix.capitalize()) + self._log_debug(f'Detected {self.prefix and " " + self.prefix.capitalize()}version <= 11,' + f' using HTTP API') __method_send = self._send command = dict(command='ExecBuiltIn', parameter='Notification(%s,%s)' % (title, body)) else: - self._log_debug(u'Detected version >= 12, using JSON API') + self._log_debug('Detected version >= 12, using JSON API') __method_send = self._send_json command = dict(method='GUI.ShowNotification', params=dict( [('title', title), ('message', body), ('image', self._sg_logo_url)] diff --git a/sickgear/notifiers/libnotify.py b/sickgear/notifiers/libnotify.py index c4a82ffb..9dc47547 100644 --- a/sickgear/notifiers/libnotify.py +++ b/sickgear/notifiers/libnotify.py @@ -44,14 +44,14 @@ def diagnose(): try: bus = dbus.SessionBus() except dbus.DBusException as e: - return (u'Error: unable to connect to D-Bus session bus: %s. ' - u'Are you running SickGear in a desktop session?') % (cgi.escape(e),) + return (f'Error: unable to connect to D-Bus session bus: {cgi.escape(e)}.' + f' Are you running SickGear in a desktop session?') try: bus.get_object('org.freedesktop.Notifications', '/org/freedesktop/Notifications') except dbus.DBusException as e: - return (u'Error: there doesn\'t seem to be a notification daemon available: %s ' - u'Try installing notification-daemon or notify-osd.') % (cgi.escape(e),) + return (f'Error: there doesn\'t seem to be a notification daemon available: {cgi.escape(e)}.' + f' Try installing notification-daemon or notify-osd.') return 'Error: Unable to send notification.' @@ -71,18 +71,18 @@ class LibnotifyNotifier(Notifier): # noinspection PyPackageRequirements import pynotify except ImportError: - self._log_error(u'Unable to import pynotify. libnotify notifications won\'t work') + self._log_error("Unable to import pynotify. libnotify notifications won't work") return False try: # noinspection PyPackageRequirements from gi.repository import GObject except ImportError: - self._log_error(u'Unable to import GObject from gi.repository. Cannot catch a GError in display') + self._log_error('Unable to import GObject from gi.repository. Cannot catch a GError in display') return False if not pynotify.init('SickGear'): - self._log_error(u'Initialization of pynotify failed. libnotify notifications won\'t work') + self._log_error('Initialization of pynotify failed. libnotify notifications won\'t work') return False self.pynotify = pynotify diff --git a/sickgear/notifiers/nmj.py b/sickgear/notifiers/nmj.py index 03be6551..fef80f01 100644 --- a/sickgear/notifiers/nmj.py +++ b/sickgear/notifiers/nmj.py @@ -43,11 +43,11 @@ class NMJNotifier(BaseNotifier): try: terminal = telnetlib.Telnet(host) except (BaseException, Exception): - self._log_warning(u'Unable to get a telnet session to %s' % host) + self._log_warning(f'Unable to get a telnet session to {host}') if result: # tell the terminal to output the necessary info to the screen so we can search it later - self._log_debug(u'Connected to %s via telnet' % host) + self._log_debug(f'Connected to {host} via telnet') terminal.read_until('sh-3.00# ') terminal.write('cat /tmp/source\n') terminal.write('cat /tmp/netshare\n') @@ -57,11 +57,11 @@ class NMJNotifier(BaseNotifier): match = re.search(r'(.+\.db)\r\n?(.+)(?=sh-3.00# cat /tmp/netshare)', tnoutput) # if we found the database in the terminal output then save that database to the config if not match: - self._log_warning(u'Could not get current NMJ database on %s, NMJ is probably not running!' % host) + self._log_warning(f'Could not get current NMJ database on {host}, NMJ is probably not running!') else: database = match.group(1) device = match.group(2) - self._log_debug(u'Found NMJ database %s on device %s' % (database, device)) + self._log_debug(f'Found NMJ database {database} on device {device}') sickgear.NMJ_DATABASE = database # if the device is a remote host then try to parse the mounting URL and save it to the config if device.startswith('NETWORK_SHARE/'): @@ -72,7 +72,7 @@ class NMJNotifier(BaseNotifier): 'but could not get the mounting url') else: mount = match.group().replace('127.0.0.1', host) - self._log_debug(u'Found mounting url on the Popcorn Hour in configuration: %s' % mount) + self._log_debug(f'Found mounting url on the Popcorn Hour in configuration: {mount}') sickgear.NMJ_MOUNT = mount result = True @@ -96,23 +96,23 @@ class NMJNotifier(BaseNotifier): database = self._choose(database, sickgear.NMJ_DATABASE) mount = self._choose(mount, sickgear.NMJ_MOUNT) - self._log_debug(u'Sending scan command for NMJ ') + self._log_debug('Sending scan command for NMJ') # if a mount URL is provided then attempt to open a handle to that URL if mount: try: req = urllib.request.Request(mount) - self._log_debug(u'Try to mount network drive via url: %s' % mount) + self._log_debug(f'Try to mount network drive via url: {mount}') http_response_obj = urllib.request.urlopen(req) # PY2 http_response_obj has no `with` context manager http_response_obj.close() except IOError as e: if hasattr(e, 'reason'): - self._log_warning(u'Could not contact Popcorn Hour on host %s: %s' % (host, e.reason)) + self._log_warning(f'Could not contact Popcorn Hour on host {host}: {e.reason}') elif hasattr(e, 'code'): - self._log_warning(u'Problem with Popcorn Hour on host %s: %s' % (host, e.code)) + self._log_warning(f'Problem with Popcorn Hour on host {host}: {e.code}') return False except (BaseException, Exception) as e: - self._log_error(u'Unknown exception: ' + ex(e)) + self._log_error(f'Unknown exception: {ex(e)}') return False # build up the request URL and parameters @@ -123,18 +123,18 @@ class NMJNotifier(BaseNotifier): # send the request to the server try: req = urllib.request.Request(update_url) - self._log_debug(u'Sending scan update command via url: %s' % update_url) + self._log_debug(f'Sending scan update command via url: {update_url}') http_response_obj = urllib.request.urlopen(req) response = http_response_obj.read() http_response_obj.close() except IOError as e: if hasattr(e, 'reason'): - self._log_warning(u'Could not contact Popcorn Hour on host %s: %s' % (host, e.reason)) + self._log_warning(f'Could not contact Popcorn Hour on host {host}: {e.reason}') elif hasattr(e, 'code'): - self._log_warning(u'Problem with Popcorn Hour on host %s: %s' % (host, e.code)) + self._log_warning(f'Problem with Popcorn Hour on host {host}: {e.code}') return False except (BaseException, Exception) as e: - self._log_error(u'Unknown exception: ' + ex(e)) + self._log_error(f'Unknown exception: {ex(e)}') return False # try to parse the resulting XML @@ -142,15 +142,15 @@ class NMJNotifier(BaseNotifier): et = etree.fromstring(response) result = et.findtext('returnValue') except SyntaxError as e: - self._log_error(u'Unable to parse XML returned from the Popcorn Hour: %s' % ex(e)) + self._log_error(f'Unable to parse XML returned from the Popcorn Hour: {ex(e)}') return False - # if the result was a number then consider that an error + # if the result was a number, then consider that an error if 0 < int(result): - self._log_error(u'Popcorn Hour returned an errorcode: %s' % result) + self._log_error(f'Popcorn Hour returned an errorcode: {result}') return False - self._log(u'NMJ started background scan') + self._log('NMJ started background scan') return True def _notify(self, host=None, database=None, mount=None, **kwargs): diff --git a/sickgear/notifiers/nmjv2.py b/sickgear/notifiers/nmjv2.py index 8303bae9..654d69a1 100644 --- a/sickgear/notifiers/nmjv2.py +++ b/sickgear/notifiers/nmjv2.py @@ -78,7 +78,7 @@ class NMJv2Notifier(BaseNotifier): result = True except IOError as e: - self._log_warning(u'Couldn\'t contact popcorn hour on host %s: %s' % (host, ex(e))) + self._log_warning(f'Couldn\'t contact popcorn hour on host {host}: {ex(e)}') if result: return '{"message": "Success, NMJ Database found at: %(host)s", "database": "%(database)s"}' % { @@ -100,7 +100,7 @@ class NMJv2Notifier(BaseNotifier): host = self._choose(host, sickgear.NMJv2_HOST) - self._log_debug(u'Sending scan command for NMJ ') + self._log_debug('Sending scan command for NMJ ') # if a host is provided then attempt to open a handle to that URL try: @@ -108,11 +108,11 @@ class NMJv2Notifier(BaseNotifier): url_scandir = '%s%s%s' % (base_url, 'metadata_database?', urlencode( dict(arg0='update_scandir', arg1=sickgear.NMJv2_DATABASE, arg2='', arg3='update_all'))) - self._log_debug(u'Scan update command sent to host: %s' % host) + self._log_debug(f'Scan update command sent to host: {host}') url_updatedb = '%s%s%s' % (base_url, 'metadata_database?', urlencode( dict(arg0='scanner_start', arg1=sickgear.NMJv2_DATABASE, arg2='background', arg3=''))) - self._log_debug(u'Try to mount network drive via url: %s' % host) + self._log_debug(f'Try to mount network drive via url: {host}') prereq = urllib.request.Request(url_scandir) req = urllib.request.Request(url_updatedb) @@ -127,24 +127,24 @@ class NMJv2Notifier(BaseNotifier): response2 = http_response_obj2.read() http_response_obj2.close() except IOError as e: - self._log_warning(u'Couldn\'t contact popcorn hour on host %s: %s' % (host, ex(e))) + self._log_warning(f'Couldn\'t contact popcorn hour on host {host}: {ex(e)}') return False try: et = etree.fromstring(response1) result1 = et.findtext('returnValue') except SyntaxError as e: - self._log_error(u'Unable to parse XML returned from the Popcorn Hour: update_scandir, %s' % ex(e)) + self._log_error(f'Unable to parse XML returned from the Popcorn Hour: update_scandir, {ex(e)}') return False try: et = etree.fromstring(response2) result2 = et.findtext('returnValue') except SyntaxError as e: - self._log_error(u'Unable to parse XML returned from the Popcorn Hour: scanner_start, %s' % ex(e)) + self._log_error(f'Unable to parse XML returned from the Popcorn Hour: scanner_start, {ex(e)}') return False - # if the result was a number then consider that an error + # if the result was a number, then consider that an error error_codes = ['8', '11', '22', '49', '50', '51', '60'] error_messages = ['Invalid parameter(s)/argument(s)', 'Invalid database path', @@ -155,15 +155,15 @@ class NMJv2Notifier(BaseNotifier): 'Read only file system'] if 0 < int(result1): index = error_codes.index(result1) - self._log_error(u'Popcorn Hour returned an error: %s' % (error_messages[index])) + self._log_error(f'Popcorn Hour returned an error: {error_messages[index]}') return False elif 0 < int(result2): index = error_codes.index(result2) - self._log_error(u'Popcorn Hour returned an error: %s' % (error_messages[index])) + self._log_error(f'Popcorn Hour returned an error: {error_messages[index]}') return False - self._log(u'NMJv2 started background scan') + self._log('NMJv2 started background scan') return True def _notify(self, host=None, **kwargs): diff --git a/sickgear/notifiers/plex.py b/sickgear/notifiers/plex.py index b84c7d89..38e3417a 100644 --- a/sickgear/notifiers/plex.py +++ b/sickgear/notifiers/plex.py @@ -45,33 +45,33 @@ class PLEXNotifier(Notifier): """ if not host: - self._log_error(u'No host specified, check your settings') + self._log_error('No host specified, check your settings') return False for key in command: command[key] = command[key].encode('utf-8') enc_command = urlencode(command) - self._log_debug(u'Encoded API command: ' + enc_command) + self._log_debug(f'Encoded API command: {enc_command}') url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, enc_command) try: req = urllib.request.Request(url) if password: req.add_header('Authorization', 'Basic %s' % b64encodestring('%s:%s' % (username, password))) - self._log_debug(u'Contacting (with auth header) via url: ' + url) + self._log_debug(f'Contacting (with auth header) via url: {url}') else: - self._log_debug(u'Contacting via url: ' + url) + self._log_debug(f'Contacting via url: {url}') http_response_obj = urllib.request.urlopen(req) # PY2 http_response_obj has no `with` context manager result = decode_str(http_response_obj.read(), sickgear.SYS_ENCODING) http_response_obj.close() - self._log_debug(u'HTTP response: ' + result.replace('\n', '')) + self._log_debug('HTTP response: ' + result.replace('\n', '')) return True except (urllib.error.URLError, IOError) as e: - self._log_warning(u'Couldn\'t contact Plex at ' + url + ' ' + ex(e)) + self._log_warning(f'Couldn\'t contact Plex at {url} {ex(e)}') return False @staticmethod @@ -113,7 +113,7 @@ class PLEXNotifier(Notifier): results = [] for cur_host in [x.strip() for x in host.split(',')]: cur_host = unquote_plus(cur_host) - self._log(u'Sending notification to \'%s\'' % cur_host) + self._log(f'Sending notification to \'{cur_host}\'') result = self._send_to_plex(command, cur_host, username, password) results += [self._choose(('%s Plex client ... %s' % (('Successful test notice sent to', 'Failed test for')[not result], cur_host)), result)] @@ -148,7 +148,7 @@ class PLEXNotifier(Notifier): """ host = self._choose(host, sickgear.PLEX_SERVER_HOST) if not host: - msg = u'No Plex Media Server host specified, check your settings' + msg = 'No Plex Media Server host specified, check your settings' self._log_debug(msg) return '%sFail: %s' % (('', '
')[self._testing], msg) @@ -159,7 +159,7 @@ class PLEXNotifier(Notifier): token_arg = None if username and password: - self._log_debug(u'Fetching plex.tv credentials for user: ' + username) + self._log_debug('Fetching plex.tv credentials for user: ' + username) req = urllib.request.Request('https://plex.tv/users/sign_in.xml', data=b'') req.add_header('Authorization', 'Basic %s' % b64encodestring('%s:%s' % (username, password))) req.add_header('X-Plex-Device-Name', 'SickGear') @@ -176,10 +176,10 @@ class PLEXNotifier(Notifier): token_arg = '?X-Plex-Token=' + token except urllib.error.URLError as e: - self._log(u'Error fetching credentials from plex.tv for user %s: %s' % (username, ex(e))) + self._log(f'Error fetching credentials from plex.tv for user {username}: {ex(e)}') except (ValueError, IndexError) as e: - self._log(u'Error parsing plex.tv response: ' + ex(e)) + self._log('Error parsing plex.tv response: ' + ex(e)) file_location = location if None is not location else '' if None is ep_obj else ep_obj.location host_validate = self._get_host_list(host, all([token_arg])) @@ -198,7 +198,7 @@ class PLEXNotifier(Notifier): sections = response.findall('.//Directory') if not sections: - self._log(u'Plex Media Server not running on: ' + cur_host) + self._log('Plex Media Server not running on: ' + cur_host) hosts_failed.append(cur_host) continue @@ -232,17 +232,17 @@ class PLEXNotifier(Notifier): host_list.append(cur_host) else: hosts_failed.append(cur_host) - self._log_error(u'Error updating library section for Plex Media Server: %s' % cur_host) + self._log_error(f'Error updating library section for Plex Media Server: {cur_host}') if len(hosts_failed) == len(host_validate): - self._log(u'No successful Plex host updated') + self._log('No successful Plex host updated') return 'Fail no successful Plex host updated: %s' % ', '.join([host for host in hosts_failed]) else: hosts = ', '.join(set(host_list)) if len(hosts_match): - self._log(u'Hosts updating where TV section paths match the downloaded show: %s' % hosts) + self._log(f'Hosts updating where TV section paths match the downloaded show: {hosts}') else: - self._log(u'Updating all hosts with TV sections: %s' % hosts) + self._log(f'Updating all hosts with TV sections: {hosts}') return '' hosts = [ diff --git a/sickgear/notifiers/prowl.py b/sickgear/notifiers/prowl.py index 876d66c0..c7b6a92b 100644 --- a/sickgear/notifiers/prowl.py +++ b/sickgear/notifiers/prowl.py @@ -52,7 +52,7 @@ class ProwlNotifier(Notifier): if 200 != response.status: if 401 == response.status: - result = u'Authentication, %s (bad API key?)' % response.reason + result = f'Authentication, {response.reason} (bad API key?)' else: result = 'Http response code "%s"' % response.status diff --git a/sickgear/notifiers/pushalot.py b/sickgear/notifiers/pushalot.py index 50772f09..3b28526e 100644 --- a/sickgear/notifiers/pushalot.py +++ b/sickgear/notifiers/pushalot.py @@ -30,7 +30,7 @@ class PushalotNotifier(Notifier): pushalot_auth_token = self._choose(pushalot_auth_token, sickgear.PUSHALOT_AUTHORIZATIONTOKEN) - self._log_debug(u'Title: %s, Message: %s, API: %s' % (title, body, pushalot_auth_token)) + self._log_debug(f'Title: {title}, Message: {body}, API: {pushalot_auth_token}') http_handler = moves.http_client.HTTPSConnection('pushalot.com') @@ -49,7 +49,7 @@ class PushalotNotifier(Notifier): if 200 != response.status: if 410 == response.status: - result = u'Authentication, %s (bad API key?)' % response.reason + result = f'Authentication, {response.reason} (bad API key?)' else: result = 'Http response code "%s"' % response.status diff --git a/sickgear/notifiers/pushbullet.py b/sickgear/notifiers/pushbullet.py index 1057bc68..bdda0bc5 100644 --- a/sickgear/notifiers/pushbullet.py +++ b/sickgear/notifiers/pushbullet.py @@ -69,7 +69,7 @@ class PushbulletNotifier(Notifier): result = resp.json()['error']['message'] except (BaseException, Exception): result = 'no response' - self._log_warning(u'%s' % result) + self._log_warning(f'{result}') return self._choose((True, 'Failed to send notification: %s' % result)[bool(result)], not bool(result)) diff --git a/sickgear/notifiers/pytivo.py b/sickgear/notifiers/pytivo.py index b05dddaa..a380a347 100644 --- a/sickgear/notifiers/pytivo.py +++ b/sickgear/notifiers/pytivo.py @@ -66,7 +66,7 @@ class PyTivoNotifier(BaseNotifier): request_url = 'http://%s/TiVoConnect?%s' % (host, urlencode( dict(Command='Push', Container=container, File=file_path, tsn=tsn))) - self._log_debug(u'Requesting ' + request_url) + self._log_debug(f'Requesting {request_url}') request = urllib.request.Request(request_url) @@ -76,17 +76,17 @@ class PyTivoNotifier(BaseNotifier): except urllib.error.HTTPError as e: if hasattr(e, 'reason'): - self._log_error(u'Error, failed to reach a server - ' + e.reason) + self._log_error('Error, failed to reach a server - ' + e.reason) return False elif hasattr(e, 'code'): - self._log_error(u'Error, the server couldn\'t fulfill the request - ' + e.code) + self._log_error('Error, the server couldn\'t fulfill the request - ' + e.code) return False except (BaseException, Exception) as e: - self._log_error(u'Unknown exception: ' + ex(e)) + self._log_error(f'Unknown exception: {ex(e)}') return False - self._log(u'Successfully requested transfer of file') + self._log('Successfully requested transfer of file') return True diff --git a/sickgear/notifiers/synoindex.py b/sickgear/notifiers/synoindex.py index 6e4bd5a9..32f6e089 100644 --- a/sickgear/notifiers/synoindex.py +++ b/sickgear/notifiers/synoindex.py @@ -32,11 +32,11 @@ class SynoIndexNotifier(BaseNotifier): self._move_object(old_file, new_file) def _cmdline_run(self, synoindex_cmd): - self._log_debug(u'Executing command ' + str(synoindex_cmd)) - self._log_debug(u'Absolute path to command: ' + os.path.abspath(synoindex_cmd[0])) + self._log_debug(f'Executing command {str(synoindex_cmd)}') + self._log_debug(f'Absolute path to command: {os.path.abspath(synoindex_cmd[0])}') try: output, err, exit_status = cmdline_runner(synoindex_cmd) - self._log_debug(u'Script result: %s' % output) + self._log_debug(f'Script result: {output}') except (BaseException, Exception) as e: self._log_error('Unable to run synoindex: %s' % ex(e)) diff --git a/sickgear/notifiers/synologynotifier.py b/sickgear/notifiers/synologynotifier.py index 1aacb2f8..51242a04 100644 --- a/sickgear/notifiers/synologynotifier.py +++ b/sickgear/notifiers/synologynotifier.py @@ -27,11 +27,11 @@ class SynologyNotifier(Notifier): def _notify(self, title, body, **kwargs): synodsmnotify_cmd = ['/usr/syno/bin/synodsmnotify', '@administrators', title, body] - self._log(u'Executing command ' + str(synodsmnotify_cmd)) - self._log_debug(u'Absolute path to command: ' + os.path.abspath(synodsmnotify_cmd[0])) + self._log(f'Executing command {synodsmnotify_cmd}') + self._log_debug(f'Absolute path to command: {os.path.abspath(synodsmnotify_cmd[0])}') try: output, err, exit_status = cmdline_runner(synodsmnotify_cmd) - self._log_debug(u'Script result: %s' % output) + self._log_debug(f'Script result: {output}') except (BaseException, Exception) as e: self._log('Unable to run synodsmnotify: %s' % ex(e)) diff --git a/sickgear/notifiers/telegram.py b/sickgear/notifiers/telegram.py index f528864e..96d86319 100644 --- a/sickgear/notifiers/telegram.py +++ b/sickgear/notifiers/telegram.py @@ -40,8 +40,8 @@ class TelegramNotifier(Notifier): access_token = self._choose(access_token, sickgear.TELEGRAM_ACCESS_TOKEN) cid = self._choose(chatid, sickgear.TELEGRAM_CHATID) try: - msg = self._body_only(('' if not title else u'%s' % title), body) - msg = msg.replace(u'%s: ' % title, u'%s:\r\n' % ('SickGear ' + title, title)[use_icon]) + msg = self._body_only(('' if not title else f'{title}'), body) + msg = msg.replace(f'{title}: ', f'{("SickGear " + title, title)[use_icon]}:\r\n') # HTML spaces ( ) and tabs ( ) aren't supported # See https://core.telegram.org/bots/api#html-style msg = re.sub('(?i) ?', ' ', msg) diff --git a/sickgear/notifiers/xbmc.py b/sickgear/notifiers/xbmc.py index 67b0412e..8dcae9ce 100644 --- a/sickgear/notifiers/xbmc.py +++ b/sickgear/notifiers/xbmc.py @@ -102,26 +102,26 @@ class XBMCNotifier(Notifier): """ - self._log(u'Sending request to update library for host: "%s"' % host) + self._log(f'Sending request to update library for host: "{host}"') xbmcapi = self._get_xbmc_version(host, sickgear.XBMC_USERNAME, sickgear.XBMC_PASSWORD) if xbmcapi: if 4 >= xbmcapi: # try to update for just the show, if it fails, do full update if enabled if not self._update_library_http(host, show_name) and sickgear.XBMC_UPDATE_FULL: - self._log_warning(u'Single show update failed, falling back to full update') + self._log_warning('Single show update failed, falling back to full update') return self._update_library_http(host) else: return True else: # try to update for just the show, if it fails, do full update if enabled if not self._update_library_json(host, show_name) and sickgear.XBMC_UPDATE_FULL: - self._log_warning(u'Single show update failed, falling back to full update') + self._log_warning('Single show update failed, falling back to full update') return self._update_library_json(host) else: return True - self._log_debug(u'Failed to detect version for "%s", check configuration and try again' % host) + self._log_debug(f'Failed to detect version for "{host}", check configuration and try again') return False # ############################################################################# @@ -142,7 +142,7 @@ class XBMCNotifier(Notifier): """ if not host: - self._log_debug(u'No host passed, aborting update') + self._log_debug('No host passed, aborting update') return False username = self._choose(username, sickgear.XBMC_USERNAME) @@ -152,7 +152,7 @@ class XBMCNotifier(Notifier): command[key] = command[key].encode('utf-8') enc_command = urlencode(command) - self._log_debug(u'Encoded API command: ' + enc_command) + self._log_debug('Encoded API command: ' + enc_command) url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, enc_command) try: @@ -160,19 +160,19 @@ class XBMCNotifier(Notifier): # if we have a password, use authentication if password: req.add_header('Authorization', 'Basic %s' % b64encodestring('%s:%s' % (username, password))) - self._log_debug(u'Contacting (with auth header) via url: ' + url) + self._log_debug(f'Contacting (with auth header) via url: {url}') else: - self._log_debug(u'Contacting via url: ' + url) + self._log_debug(f'Contacting via url: {url}') http_response_obj = urllib.request.urlopen(req) # PY2 http_response_obj has no `with` context manager result = decode_str(http_response_obj.read(), sickgear.SYS_ENCODING) http_response_obj.close() - self._log_debug(u'HTTP response: ' + result.replace('\n', '')) + self._log_debug('HTTP response: ' + result.replace('\n', '')) return result except (urllib.error.URLError, IOError) as e: - self._log_warning(u'Couldn\'t contact HTTP at %s %s' % (url, ex(e))) + self._log_warning(f'Couldn\'t contact HTTP at {url} {ex(e)}') return False def _update_library_http(self, host=None, show_name=None): @@ -191,14 +191,14 @@ class XBMCNotifier(Notifier): """ if not host: - self._log_debug(u'No host passed, aborting update') + self._log_debug('No host passed, aborting update') return False - self._log_debug(u'Updating XMBC library via HTTP method for host: ' + host) + self._log_debug('Updating XMBC library via HTTP method for host: ' + host) # if we're doing per-show if show_name: - self._log_debug(u'Updating library via HTTP method for show ' + show_name) + self._log_debug('Updating library via HTTP method for show ' + show_name) # noinspection SqlResolve path_sql = 'select path.strPath' \ @@ -224,30 +224,30 @@ class XBMCNotifier(Notifier): self._send_to_xbmc(reset_command, host) if not sql_xml: - self._log_debug(u'Invalid response for ' + show_name + ' on ' + host) + self._log_debug('Invalid response for ' + show_name + ' on ' + host) return False enc_sql_xml = quote(sql_xml, ':\\/<>') try: et = etree.fromstring(enc_sql_xml) except SyntaxError as e: - self._log_error(u'Unable to parse XML response: ' + ex(e)) + self._log_error(f'Unable to parse XML response: {ex(e)}') return False paths = et.findall('.//field') if not paths: - self._log_debug(u'No valid paths found for ' + show_name + ' on ' + host) + self._log_debug('No valid paths found for ' + show_name + ' on ' + host) return False for path in paths: # we do not need it double-encoded, gawd this is dumb un_enc_path = decode_str(unquote(path.text), sickgear.SYS_ENCODING) - self._log_debug(u'Updating ' + show_name + ' on ' + host + ' at ' + un_enc_path) + self._log_debug('Updating ' + show_name + ' on ' + host + ' at ' + un_enc_path) update_command = dict(command='ExecBuiltIn', parameter='XBMC.updatelibrary(video, %s)' % un_enc_path) request = self._send_to_xbmc(update_command, host) if not request: - self._log_error(u'Update of show directory failed on ' + show_name + self._log_error('Update of show directory failed on ' + show_name + ' on ' + host + ' at ' + un_enc_path) return False # sleep for a few seconds just to be sure xbmc has a chance to finish each directory @@ -255,12 +255,12 @@ class XBMCNotifier(Notifier): time.sleep(5) # do a full update if requested else: - self._log(u'Doing full library update on host: ' + host) + self._log('Doing full library update on host: ' + host) update_command = {'command': 'ExecBuiltIn', 'parameter': 'XBMC.updatelibrary(video)'} request = self._send_to_xbmc(update_command, host) if not request: - self._log_error(u'Full Library update failed on: ' + host) + self._log_error('Full Library update failed on: ' + host) return False return True @@ -284,14 +284,14 @@ class XBMCNotifier(Notifier): """ if not host: - self._log_debug(u'No host passed, aborting update') + self._log_debug('No host passed, aborting update') return False username = self._choose(username, sickgear.XBMC_USERNAME) password = self._choose(password, sickgear.XBMC_PASSWORD) command = command.encode('utf-8') - self._log_debug(u'JSON command: ' + command) + self._log_debug('JSON command: ' + command) url = 'http://%s/jsonrpc' % host try: @@ -300,28 +300,28 @@ class XBMCNotifier(Notifier): # if we have a password, use authentication if password: req.add_header('Authorization', 'Basic %s' % b64encodestring('%s:%s' % (username, password))) - self._log_debug(u'Contacting (with auth header) via url: ' + url) + self._log_debug(f'Contacting (with auth header) via url: {url}') else: - self._log_debug(u'Contacting via url: ' + url) + self._log_debug(f'Contacting via url: {url}') try: http_response_obj = urllib.request.urlopen(req) # PY2 http_response_obj has no `with` context manager except urllib.error.URLError as e: - self._log_warning(u'Error while trying to retrieve API version for "%s": %s' % (host, ex(e))) + self._log_warning(f'Error while trying to retrieve API version for "{host}": {ex(e)}') return False # parse the json result try: result = json_load(http_response_obj) http_response_obj.close() - self._log_debug(u'JSON response: ' + str(result)) + self._log_debug(f'JSON response: {result}') return result # need to return response for parsing except ValueError: - self._log_warning(u'Unable to decode JSON: ' + http_response_obj) + self._log_warning('Unable to decode JSON: ' + http_response_obj) return False except IOError as e: - self._log_warning(u'Couldn\'t contact JSON API at ' + url + ' ' + ex(e)) + self._log_warning(f'Couldn\'t contact JSON API at {url} {ex(e)}') return False def _update_library_json(self, host=None, show_name=None): @@ -340,15 +340,15 @@ class XBMCNotifier(Notifier): """ if not host: - self._log_debug(u'No host passed, aborting update') + self._log_debug('No host passed, aborting update') return False - self._log(u'Updating XMBC library via JSON method for host: ' + host) + self._log('Updating XMBC library via JSON method for host: ' + host) # if we're doing per-show if show_name: tvshowid = -1 - self._log_debug(u'Updating library via JSON method for show ' + show_name) + self._log_debug('Updating library via JSON method for show ' + show_name) # get tvshowid by showName shows_command = '{"jsonrpc":"2.0","method":"VideoLibrary.GetTVShows","id":1}' @@ -357,7 +357,7 @@ class XBMCNotifier(Notifier): if shows_response and 'result' in shows_response and 'tvshows' in shows_response['result']: shows = shows_response['result']['tvshows'] else: - self._log_debug(u'No tvshows in TV show list') + self._log_debug('No tvshows in TV show list') return False for show in shows: @@ -370,7 +370,7 @@ class XBMCNotifier(Notifier): # we didn't find the show (exact match), thus revert to just doing a full update if enabled if -1 == tvshowid: - self._log_debug(u'Exact show name not matched in TV show list') + self._log_debug('Exact show name not matched in TV show list') return False # lookup tv-show path @@ -379,19 +379,19 @@ class XBMCNotifier(Notifier): path_response = self._send_to_xbmc_json(path_command, host) path = path_response['result']['tvshowdetails']['file'] - self._log_debug(u'Received Show: ' + show_name + ' with ID: ' + str(tvshowid) + ' Path: ' + path) + self._log_debug('Received Show: ' + show_name + ' with ID: ' + str(tvshowid) + ' Path: ' + path) if 1 > len(path): - self._log_warning(u'No valid path found for ' + show_name + ' with ID: ' + self._log_warning('No valid path found for ' + show_name + ' with ID: ' + str(tvshowid) + ' on ' + host) return False - self._log_debug(u'Updating ' + show_name + ' on ' + host + ' at ' + path) + self._log_debug('Updating ' + show_name + ' on ' + host + ' at ' + path) update_command = '{"jsonrpc":"2.0","method":"VideoLibrary.Scan","params":{"directory":%s},"id":1}' % ( json_dumps(path)) request = self._send_to_xbmc_json(update_command, host) if not request: - self._log_error(u'Update of show directory failed on ' + show_name + ' on ' + host + ' at ' + path) + self._log_error('Update of show directory failed on ' + show_name + ' on ' + host + ' at ' + path) return False # catch if there was an error in the returned request @@ -399,18 +399,18 @@ class XBMCNotifier(Notifier): for r in request: if 'error' in r: self._log_error( - u'Error while attempting to update show directory for ' + show_name + 'Error while attempting to update show directory for ' + show_name + ' on ' + host + ' at ' + path) return False # do a full update if requested else: - self._log(u'Doing Full Library update on host: ' + host) + self._log('Doing Full Library update on host: ' + host) update_command = '{"jsonrpc":"2.0","method":"VideoLibrary.Scan","id":1}' request = self._send_to_xbmc_json(update_command, host, sickgear.XBMC_USERNAME, sickgear.XBMC_PASSWORD) if not request: - self._log_error(u'Full Library update failed on: ' + host) + self._log_error('Full Library update failed on: ' + host) return False return True @@ -441,12 +441,12 @@ class XBMCNotifier(Notifier): for cur_host in [x.strip() for x in hosts.split(',')]: cur_host = unquote_plus(cur_host) - self._log(u'Sending notification to "%s"' % cur_host) + self._log(f'Sending notification to "{cur_host}"') xbmcapi = self._get_xbmc_version(cur_host, username, password) if xbmcapi: if 4 >= xbmcapi: - self._log_debug(u'Detected version <= 11, using HTTP API') + self._log_debug('Detected version <= 11, using HTTP API') command = dict(command='ExecBuiltIn', parameter='Notification(' + title.encode('utf-8') + ',' + body.encode('utf-8') + ')') notify_result = self._send_to_xbmc(command, cur_host, username, password) @@ -454,7 +454,7 @@ class XBMCNotifier(Notifier): result += [cur_host + ':' + str(notify_result)] success |= 'OK' in notify_result or success else: - self._log_debug(u'Detected version >= 12, using JSON API') + self._log_debug('Detected version >= 12, using JSON API') command = '{"jsonrpc":"2.0","method":"GUI.ShowNotification",' \ '"params":{"title":"%s","message":"%s", "image": "%s"},"id":1}' % \ (title.encode('utf-8'), body.encode('utf-8'), self._sg_logo_url) @@ -464,7 +464,7 @@ class XBMCNotifier(Notifier): success |= 'OK' in notify_result or success else: if sickgear.XBMC_ALWAYS_ON or self._testing: - self._log_error(u'Failed to detect version for "%s", check configuration and try again' % cur_host) + self._log_error(f'Failed to detect version for "{cur_host}", check configuration and try again') result += [cur_host + ':No response'] success = False @@ -488,7 +488,7 @@ class XBMCNotifier(Notifier): """ if not sickgear.XBMC_HOST: - self._log_debug(u'No hosts specified, check your settings') + self._log_debug('No hosts specified, check your settings') return False # either update each host, or only attempt to update until one successful result @@ -496,11 +496,11 @@ class XBMCNotifier(Notifier): for host in [x.strip() for x in sickgear.XBMC_HOST.split(',')]: if self._send_update_library(host, show_name): if sickgear.XBMC_UPDATE_ONLYFIRST: - self._log_debug(u'Successfully updated "%s", stopped sending update library commands' % host) + self._log_debug(f'Successfully updated "{host}", stopped sending update library commands') return True else: if sickgear.XBMC_ALWAYS_ON: - self._log_error(u'Failed to detect version for "%s", check configuration and try again' % host) + self._log_error(f'Failed to detect version for "{host}", check configuration and try again') result = result + 1 # needed for the 'update xbmc' submenu command diff --git a/sickgear/nzbSplitter.py b/sickgear/nzbSplitter.py index 7ac6cfe8..c4334544 100644 --- a/sickgear/nzbSplitter.py +++ b/sickgear/nzbSplitter.py @@ -73,7 +73,7 @@ def _get_season_nzbs(name, url_data, season): try: show_xml = etree.ElementTree(etree.XML(url_data)) except SyntaxError: - logger.log(u'Unable to parse the XML of %s, not splitting it' % name, logger.ERROR) + logger.error(f'Unable to parse the XML of {name}, not splitting it') return {}, '' filename = name.replace('.nzb', '') @@ -86,7 +86,7 @@ def _get_season_nzbs(name, url_data, season): if scene_name_match: show_name, quality_section = scene_name_match.groups() else: - logger.log('%s - Not a valid season pack scene name. If it\'s a valid one, log a bug.' % name, logger.ERROR) + logger.error('%s - Not a valid season pack scene name. If it\'s a valid one, log a bug.' % name) return {}, '' regex = r'(%s[\._]S%02d(?:[E0-9]+)\.[\w\._]+)' % (re.escape(show_name), season) @@ -116,7 +116,7 @@ def _get_season_nzbs(name, url_data, season): if isinstance(ext, string_types) \ and re.search(r'^\.(nzb|r\d{2}|rar|7z|zip|par2|vol\d+|nfo|srt|txt|bat|sh|mkv|mp4|avi|wmv)$', ext, flags=re.I): - logger.log('Unable to split %s into episode nzb\'s' % name, logger.WARNING) + logger.warning('Unable to split %s into episode nzb\'s' % name) return {}, '' if cur_ep not in ep_files: ep_files[cur_ep] = [cur_file] @@ -157,7 +157,7 @@ def _save_nzb(nzb_name, nzb_string): nzb_fh.write(nzb_string) except EnvironmentError as e: - logger.log(u'Unable to save NZB: ' + ex(e), logger.ERROR) + logger.error(f'Unable to save NZB: {ex(e)}') def _strip_ns(element, ns): @@ -178,7 +178,7 @@ def split_result(result): """ resp = helpers.get_url(result.url, failure_monitor=False) if None is resp: - logger.log(u'Unable to load url %s, can\'t download season NZB' % result.url, logger.ERROR) + logger.error(f'Unable to load url {result.url}, can\'t download season NZB') return False # parse the season ep name @@ -186,10 +186,10 @@ def split_result(result): np = NameParser(False, show_obj=result.show_obj) parse_result = np.parse(result.name) except InvalidNameException: - logger.log(u'Unable to parse the filename %s into a valid episode' % result.name, logger.DEBUG) + logger.debug(f'Unable to parse the filename {result.name} into a valid episode') return False except InvalidShowException: - logger.log(u'Unable to parse the filename %s into a valid show' % result.name, logger.DEBUG) + logger.debug(f'Unable to parse the filename {result.name} into a valid show') return False # bust it up @@ -201,35 +201,35 @@ def split_result(result): for new_nzb in separate_nzbs: - logger.log(u'Split out %s from %s' % (new_nzb, result.name), logger.DEBUG) + logger.debug(f'Split out {new_nzb} from {result.name}') # parse the name try: np = NameParser(False, show_obj=result.show_obj) parse_result = np.parse(new_nzb) except InvalidNameException: - logger.log(u"Unable to parse the filename %s into a valid episode" % new_nzb, logger.DEBUG) + logger.debug(f'Unable to parse the filename {new_nzb} into a valid episode') return False except InvalidShowException: - logger.log(u"Unable to parse the filename %s into a valid show" % new_nzb, logger.DEBUG) + logger.debug(f'Unable to parse the filename {new_nzb} into a valid show') return False # make sure the result is sane if (None is not parse_result.season_number and season != parse_result.season_number) \ or (None is parse_result.season_number and 1 != season): - logger.log(u'Found %s inside %s but it doesn\'t seem to belong to the same season, ignoring it' - % (new_nzb, result.name), logger.WARNING) + logger.warning(f'Found {new_nzb} inside {result.name} but it doesn\'t seem to belong to the same season,' + f' ignoring it') continue elif 0 == len(parse_result.episode_numbers): - logger.log(u'Found %s inside %s but it doesn\'t seem to be a valid episode NZB, ignoring it' - % (new_nzb, result.name), logger.WARNING) + logger.warning(f'Found {new_nzb} inside {result.name} but it doesn\'t seem to be a valid episode NZB,' + f' ignoring it') continue want_ep = True for ep_no in parse_result.episode_numbers: if not result.show_obj.want_episode(season, ep_no, result.quality): - logger.log(u'Ignoring result %s because we don\'t want an episode that is %s' - % (new_nzb, Quality.qualityStrings[result.quality]), logger.DEBUG) + logger.debug(f'Ignoring result {new_nzb} because we don\'t want an episode that is' + f' {Quality.qualityStrings[result.quality]}') want_ep = False break if not want_ep: diff --git a/sickgear/nzbget.py b/sickgear/nzbget.py index 8d54fad2..703b45bf 100644 --- a/sickgear/nzbget.py +++ b/sickgear/nzbget.py @@ -34,7 +34,7 @@ def test_nzbget(host, use_https, username, password, timeout=300): result = False if not host: msg = 'No NZBGet host found. Please configure it' - logger.log(msg, logger.ERROR) + logger.error(msg) return result, msg, None url = 'http%(scheme)s://%(username)s:%(password)s@%(host)s/xmlrpc' % { @@ -44,24 +44,24 @@ def test_nzbget(host, use_https, username, password, timeout=300): try: msg = 'Success. Connected' if rpc_client.writelog('INFO', 'SickGear connected as a test'): - logger.log(msg, logger.DEBUG) + logger.debug(msg) else: msg += ', but unable to send a message' - logger.log(msg, logger.ERROR) + logger.error(msg) result = True - logger.log(u'NZBGet URL: %s' % url, logger.DEBUG) + logger.debug(f'NZBGet URL: {url}') except moves.http_client.socket.error: msg = 'Please check NZBGet host and port (if it is running). NZBGet is not responding to these values' - logger.log(msg, logger.ERROR) + logger.error(msg) except moves.xmlrpc_client.ProtocolError as e: if 'Unauthorized' == e.errmsg: msg = 'NZBGet username or password is incorrect' - logger.log(msg, logger.ERROR) + logger.error(msg) else: msg = 'Protocol Error: %s' % e.errmsg - logger.log(msg, logger.ERROR) + logger.error(msg) return result, msg, rpc_client @@ -114,7 +114,7 @@ def send_nzb(search_result): return result nzbcontent64 = b64encodestring(data, keep_eol=True) - logger.log(u'Sending NZB to NZBGet: %s' % search_result.name) + logger.log(f'Sending NZB to NZBGet: {search_result.name}') try: # Find out if nzbget supports priority (Version 9.0+), old versions beginning with a 0.x will use the old cmd @@ -161,11 +161,11 @@ def send_nzb(search_result): nzbget_prio, False, search_result.url) if nzbget_result: - logger.log(u'NZB sent to NZBGet successfully', logger.DEBUG) + logger.debug('NZB sent to NZBGet successfully') result = True else: - logger.log(u'NZBGet could not add %s.nzb to the queue' % search_result.name, logger.ERROR) + logger.error(f'NZBGet could not add {search_result.name}.nzb to the queue') except (BaseException, Exception): - logger.log(u'Connect Error to NZBGet: could not add %s.nzb to the queue' % search_result.name, logger.ERROR) + logger.error(f'Connect Error to NZBGet: could not add {search_result.name}.nzb to the queue') return result diff --git a/sickgear/postProcessor.py b/sickgear/postProcessor.py index 48501e63..711e0bf6 100644 --- a/sickgear/postProcessor.py +++ b/sickgear/postProcessor.py @@ -111,7 +111,7 @@ class PostProcessor(object): """ logger_msg = re.sub(r'(?i)\.*', '', message) logger_msg = re.sub('(?i)]+>([^<]+)', r'\1', logger_msg) - logger.log(u'%s' % logger_msg, level) + logger.log(f'{logger_msg}', level) self.log += message + '\n' def _check_for_existing_file(self, existing_file): @@ -129,25 +129,24 @@ class PostProcessor(object): """ if not existing_file: - self._log(u'There is no existing file', logger.DEBUG) + self._log('There is no existing file', logger.DEBUG) return PostProcessor.DOESNT_EXIST # if the new file exists, return the appropriate code depending on the size if os.path.isfile(existing_file): - new_file = u'New file %s
.. is ' % self.file_path + new_file = f'New file {self.file_path}
.. is ' if os.path.getsize(self.file_path) == os.path.getsize(existing_file): - self._log(u'%sthe same size as %s' % (new_file, existing_file), logger.DEBUG) + self._log(f'{new_file}the same size as {existing_file}', logger.DEBUG) return PostProcessor.EXISTS_SAME elif os.path.getsize(self.file_path) < os.path.getsize(existing_file): - self._log(u'%ssmaller than %s' % (new_file, existing_file), logger.DEBUG) + self._log(f'{new_file}smaller than {existing_file}', logger.DEBUG) return PostProcessor.EXISTS_LARGER else: - self._log(u'%slarger than %s' % (new_file, existing_file), logger.DEBUG) + self._log(f'{new_file}larger than {existing_file}', logger.DEBUG) return PostProcessor.EXISTS_SMALLER else: - self._log(u'File doesn\'t exist %s' % existing_file, - logger.DEBUG) + self._log(f'File doesn\'t exist {existing_file}', logger.DEBUG) return PostProcessor.DOESNT_EXIST @staticmethod @@ -222,7 +221,7 @@ class PostProcessor(object): file_list = file_list + self.list_associated_files(file_path) if not file_list: - self._log(u'Not deleting anything because there are no files associated with %s' % file_path, logger.DEBUG) + self._log(f'Not deleting anything because there are no files associated with {file_path}', logger.DEBUG) return # delete the file and any other files which we want to delete @@ -234,16 +233,14 @@ class PostProcessor(object): # File is read-only, so make it writeable try: os.chmod(cur_file, stat.S_IWRITE) - self._log(u'Changed read only permissions to writeable to delete file %s' - % cur_file, logger.DEBUG) + self._log(f'Changed read only permissions to writeable to delete file {cur_file}', logger.DEBUG) except (BaseException, Exception): - self._log(u'Cannot change permissions to writeable to delete file: %s' - % cur_file, logger.WARNING) + self._log(f'Cannot change permissions to writeable to delete file: {cur_file}', logger.WARNING) removal_type = helpers.remove_file(cur_file, log_level=logger.DEBUG) if True is not os.path.isfile(cur_file): - self._log(u'%s file %s' % (removal_type, cur_file), logger.DEBUG) + self._log(f'{removal_type} file {cur_file}', logger.DEBUG) # do the library update for synoindex notifiers.NotifierFactory().get('SYNOINDEX').deleteFile(cur_file) @@ -271,7 +268,7 @@ class PostProcessor(object): """ if not action: - self._log(u'Must provide an action for the combined file operation', logger.ERROR) + self._log('Must provide an action for the combined file operation', logger.ERROR) return file_list = [file_path] @@ -281,7 +278,7 @@ class PostProcessor(object): file_list = file_list + self.list_associated_files(file_path, subtitles_only=True) if not file_list: - self._log(u'Not moving anything because there are no files associated with %s' % file_path, logger.DEBUG) + self._log(f'Not moving anything because there are no files associated with {file_path}', logger.DEBUG) return # create base name with file_path (media_file without .extension) @@ -317,7 +314,7 @@ class PostProcessor(object): subs_new_path = os.path.join(new_path, sickgear.SUBTITLES_DIR) dir_exists = helpers.make_dir(subs_new_path) if not dir_exists: - logger.log(u'Unable to create subtitles folder ' + subs_new_path, logger.ERROR) + logger.error(f'Unable to create subtitles folder {subs_new_path}') else: helpers.chmod_as_parent(subs_new_path) new_file_path = os.path.join(subs_new_path, new_file_name) @@ -345,15 +342,16 @@ class PostProcessor(object): :type action_tmpl: """ - def _int_move(cur_file_path, new_file_path, success_tmpl=u' %s to %s'): + def _int_move(cur_file_path, new_file_path, success_tmpl=' %s to %s'): try: helpers.move_file(cur_file_path, new_file_path, raise_exceptions=True) helpers.chmod_as_parent(new_file_path) - self._log(u'Moved file from' + (success_tmpl % (cur_file_path, new_file_path)), logger.DEBUG) + self._log(f'Moved file from{(success_tmpl % (cur_file_path, new_file_path))}', + logger.DEBUG) except (IOError, OSError) as e: - self._log(u'Unable to move file %s
.. %s' - % (success_tmpl % (cur_file_path, new_file_path), ex(e)), logger.ERROR) + self._log(f'Unable to move file {success_tmpl % (cur_file_path, new_file_path)}
.. {ex(e)}', + logger.ERROR) raise e self._combined_file_operation(file_path, new_path, new_base_name, associated_files, _int_move, @@ -375,15 +373,16 @@ class PostProcessor(object): :type action_tmpl: """ - def _int_copy(cur_file_path, new_file_path, success_tmpl=u' %s to %s'): + def _int_copy(cur_file_path, new_file_path, success_tmpl=' %s to %s'): try: helpers.copy_file(cur_file_path, new_file_path) helpers.chmod_as_parent(new_file_path) - self._log(u'Copied file from' + (success_tmpl % (cur_file_path, new_file_path)), logger.DEBUG) + self._log(f'Copied file from{(success_tmpl % (cur_file_path, new_file_path))}', + logger.DEBUG) except (IOError, OSError) as e: - self._log(u'Unable to copy %s
.. %s' - % (success_tmpl % (cur_file_path, new_file_path), ex(e)), logger.ERROR) + self._log(f'Unable to copy {success_tmpl % (cur_file_path, new_file_path)}
.. {ex(e)}', + logger.ERROR) raise e self._combined_file_operation(file_path, new_path, new_base_name, associated_files, _int_copy, @@ -403,15 +402,16 @@ class PostProcessor(object): :type action_tmpl: """ - def _int_hard_link(cur_file_path, new_file_path, success_tmpl=u' %s to %s'): + def _int_hard_link(cur_file_path, new_file_path, success_tmpl=' %s to %s'): try: helpers.hardlink_file(cur_file_path, new_file_path) helpers.chmod_as_parent(new_file_path) - self._log(u'Hard linked file from' + (success_tmpl % (cur_file_path, new_file_path)), logger.DEBUG) + self._log(f'Hard linked file from{(success_tmpl % (cur_file_path, new_file_path))}', + logger.DEBUG) except (IOError, OSError) as e: - self._log(u'Unable to link file %s
.. %s' - % (success_tmpl % (cur_file_path, new_file_path), ex(e)), logger.ERROR) + self._log(f'Unable to link file {success_tmpl % (cur_file_path, new_file_path)}
.. {ex(e)}', + logger.ERROR) raise e self._combined_file_operation(file_path, new_path, new_base_name, associated_files, _int_hard_link, @@ -431,16 +431,16 @@ class PostProcessor(object): :type action_tmpl: """ - def _int_move_and_sym_link(cur_file_path, new_file_path, success_tmpl=u' %s to %s'): + def _int_move_and_sym_link(cur_file_path, new_file_path, success_tmpl=' %s to %s'): try: helpers.move_and_symlink_file(cur_file_path, new_file_path) helpers.chmod_as_parent(new_file_path) - self._log(u'Moved then symbolic linked file from' + (success_tmpl % (cur_file_path, new_file_path)), + self._log(f'Moved then symbolic linked file from{(success_tmpl % (cur_file_path, new_file_path))}', logger.DEBUG) except (IOError, OSError) as e: - self._log(u'Unable to link file %s
.. %s' - % (success_tmpl % (cur_file_path, new_file_path), ex(e)), logger.ERROR) + self._log(f'Unable to link file {success_tmpl % (cur_file_path, new_file_path)}
.. {ex(e)}', + logger.ERROR) raise e self._combined_file_operation(file_path, new_path, new_base_name, associated_files, _int_move_and_sym_link, @@ -515,9 +515,9 @@ class PostProcessor(object): self.in_history = True to_return = (show_obj, season_number, episode_numbers, quality) if not show_obj: - self._log(u'Unknown show, check availability on ShowList page', logger.DEBUG) + self._log('Unknown show, check availability on ShowList page', logger.DEBUG) break - self._log(u'Found a match in history for %s' % show_obj.name, logger.DEBUG) + self._log(f'Found a match in history for {show_obj.name}', logger.DEBUG) break return to_return @@ -546,7 +546,7 @@ class PostProcessor(object): :rtype: Tuple[None, None, List, None] or Tuple[sickgear.tv.TVShow, int, List[int], int] """ - logger.log(u'Analyzing name ' + repr(name)) + logger.log(f'Analyzing name {repr(name)}') to_return = (None, None, [], None) @@ -556,8 +556,8 @@ class PostProcessor(object): # parse the name to break it into show name, season, and episode np = NameParser(resource, convert=True, show_obj=self.show_obj or show_obj) parse_result = np.parse(name) - self._log(u'Parsed %s
.. from %s' - % (decode_str(str(parse_result), errors='xmlcharrefreplace'), name), logger.DEBUG) + self._log(f'Parsed {decode_str(str(parse_result), errors="xmlcharrefreplace")}
' + f'.. from {name}', logger.DEBUG) if parse_result.is_air_by_date and (None is parse_result.season_number or not parse_result.episode_numbers): season_number = -1 @@ -598,13 +598,16 @@ class PostProcessor(object): self.release_name = helpers.remove_extension(os.path.basename(parse_result.original_name)) else: - logger.log(u'Parse result not sufficient (all following have to be set). will not save release name', - logger.DEBUG) - logger.log(u'Parse result(series_name): ' + str(parse_result.series_name), logger.DEBUG) - logger.log(u'Parse result(season_number): ' + str(parse_result.season_number), logger.DEBUG) - logger.log(u'Parse result(episode_numbers): ' + str(parse_result.episode_numbers), logger.DEBUG) - logger.log(u' or Parse result(air_date): ' + str(parse_result.air_date), logger.DEBUG) - logger.log(u'Parse result(release_group): ' + str(parse_result.release_group), logger.DEBUG) + for cur_msg in ( + 'Parse result not sufficient (all following have to be set). will not save release name', + f'Parse result(series_name): {parse_result.series_name}', + f'Parse result(season_number): {parse_result.season_number}', + f'Parse result(episode_numbers): {parse_result.episode_numbers}', + f' or Parse result(air_date): {parse_result.air_date}', + f'Parse result(release_group): {parse_result.release_group}' + ): + logger.debug(cur_msg) + def _find_info(self, history_only=False): """ @@ -632,7 +635,7 @@ class PostProcessor(object): lambda: self._analyze_name(self.file_path), # try to analyze the dir + file name together as one name - lambda: self._analyze_name(self.folder_name + u' ' + self.file_name), + lambda: self._analyze_name(f'{self.folder_name} {self.file_name}'), # try to analyze file name with previously parsed show_obj lambda: self._analyze_name(self.file_name, show_obj=show_obj, rel_grp=rel_grp)], @@ -645,7 +648,7 @@ class PostProcessor(object): try: (try_show_obj, try_season, try_episodes, try_quality) = cur_try() except (InvalidNameException, InvalidShowException) as e: - logger.log(u'Unable to parse, skipping: ' + ex(e), logger.DEBUG) + logger.debug(f'Unable to parse, skipping: {ex(e)}') continue if not try_show_obj: @@ -667,8 +670,8 @@ class PostProcessor(object): # for air-by-date shows we need to look up the season/episode from database if -1 == season_number and show_obj and episode_numbers: - self._log(u'Looks like this is an air-by-date or sports show,' - u' attempting to convert the date to season/episode', logger.DEBUG) + self._log('Looks like this is an air-by-date or sports show,' + ' attempting to convert the date to season/episode', logger.DEBUG) airdate = episode_numbers[0].toordinal() my_db = db.DBConnection() sql_result = my_db.select( @@ -681,8 +684,8 @@ class PostProcessor(object): season_number = int(sql_result[0][0]) episode_numbers = [int(sql_result[0][1])] else: - self._log(u'Unable to find episode with date %s for show %s, skipping' % - (episode_numbers[0], show_obj.tvid_prodid), logger.DEBUG) + self._log(f'Unable to find episode with date {episode_numbers[0]} for show {show_obj.tvid_prodid},' + f' skipping', logger.DEBUG) # don't leave dates in the episode list if we can't convert them to real episode numbers episode_numbers = [] continue @@ -697,8 +700,8 @@ class PostProcessor(object): [show_obj.tvid, show_obj.prodid]) if 1 == int(num_seasons_sql_result[0][0]) and None is season_number: self._log( - u'No season number found, but this show appears to only have 1 season,' - u' setting season number to 1...', logger.DEBUG) + 'No season number found, but this show appears to only have 1 season,' + ' setting season number to 1...', logger.DEBUG) season_number = 1 if show_obj and season_number and episode_numbers: @@ -731,13 +734,13 @@ class PostProcessor(object): for cur_episode_number in episode_numbers: cur_episode_number = int(cur_episode_number) - self._log(u'Retrieving episode object for %sx%s' % (season_number, cur_episode_number), logger.DEBUG) + self._log(f'Retrieving episode object for {season_number}x{cur_episode_number}', logger.DEBUG) # now that we've figured out which episode this file is just load it manually try: ep_obj = show_obj.get_episode(season_number, cur_episode_number) except exceptions_helper.EpisodeNotFoundException as e: - self._log(u'Unable to create episode: ' + ex(e), logger.DEBUG) + self._log(f'Unable to create episode: {ex(e)}', logger.DEBUG) raise exceptions_helper.PostProcessingFailed() # associate all the episodes together under a single root episode @@ -764,9 +767,8 @@ class PostProcessor(object): if ep_obj.status in common.Quality.SNATCHED_ANY: old_status, ep_quality = common.Quality.split_composite_status(ep_obj.status) if common.Quality.UNKNOWN != ep_quality: - self._log( - u'Using "%s" quality from the old status' % common.Quality.qualityStrings[ep_quality], - logger.DEBUG) + self._log(f'Using "{common.Quality.qualityStrings[ep_quality]}" quality from the old status', + logger.DEBUG) return ep_quality # search all possible names for our new quality, in case the file or dir doesn't have it @@ -780,26 +782,25 @@ class PostProcessor(object): continue ep_quality = common.Quality.name_quality(cur_name, ep_obj.show_obj.is_anime) - quality_log = u' "%s" quality parsed from the %s %s'\ - % (common.Quality.qualityStrings[ep_quality], thing, cur_name) + quality_log = f' "{common.Quality.qualityStrings[ep_quality]}" quality parsed from the {thing} {cur_name}' # if we find a good one then use it if common.Quality.UNKNOWN != ep_quality: - self._log(u'Using' + quality_log, logger.DEBUG) + self._log(f'Using{quality_log}', logger.DEBUG) return ep_quality else: - self._log(u'Found' + quality_log, logger.DEBUG) + self._log(f'Found{quality_log}', logger.DEBUG) ep_quality = common.Quality.file_quality(self.file_path) if common.Quality.UNKNOWN != ep_quality: - self._log(u'Using "%s" quality parsed from the metadata file content of %s' - % (common.Quality.qualityStrings[ep_quality], self.file_name), logger.DEBUG) + self._log(f'Using "{common.Quality.qualityStrings[ep_quality]}" quality parsed' + f' from the metadata file content of {self.file_name}', logger.DEBUG) return ep_quality # Try guessing quality from the file name ep_quality = common.Quality.assume_quality(self.file_name) - self._log(u'Using guessed "%s" quality from the file name %s' - % (common.Quality.qualityStrings[ep_quality], self.file_name), logger.DEBUG) + self._log(f'Using guessed "{common.Quality.qualityStrings[ep_quality]}" quality' + f' from the file name {self.file_name}', logger.DEBUG) return ep_quality @@ -822,7 +823,7 @@ class PostProcessor(object): try: script_cmd = [piece for piece in re.split("( |\\\".*?\\\"|'.*?')", script_name) if piece.strip()] script_cmd[0] = os.path.abspath(script_cmd[0]) - self._log(u'Absolute path to script: ' + script_cmd[0], logger.DEBUG) + self._log(f'Absolute path to script: {script_cmd[0]}', logger.DEBUG) script_cmd += [ep_obj.location, self.file_path] @@ -832,7 +833,7 @@ class PostProcessor(object): str(ep_obj.episode), str(ep_obj.airdate)] - self._log(u'Executing command ' + str(script_cmd)) + self._log(f'Executing command {script_cmd}') except (BaseException, Exception) as e: self._log('Error creating extra script command: %s' % ex(e), logger.ERROR) return @@ -843,10 +844,10 @@ class PostProcessor(object): self._log('Script result: %s' % output, logger.DEBUG) except OSError as e: - self._log(u'Unable to run extra_script: ' + ex(e), logger.ERROR) + self._log(f'Unable to run extra_script: {ex(e)}', logger.ERROR) except (BaseException, Exception) as e: - self._log(u'Unable to run extra_script: ' + ex(e), logger.ERROR) + self._log(f'Unable to run extra_script: {ex(e)}', logger.ERROR) def _run_extra_scripts(self, ep_obj): """ @@ -881,48 +882,48 @@ class PostProcessor(object): if not existing_show_path and not sickgear.CREATE_MISSING_SHOW_DIRS: # Show location does not exist, and cannot be created, marking it unsafe to proceed - self._log(u'.. marking it unsafe to proceed because show location does not exist', logger.DEBUG) + self._log('.. marking it unsafe to proceed because show location does not exist', logger.DEBUG) return False # if SickGear snatched this then assume it's safe if ep_obj.status in common.Quality.SNATCHED_ANY: - self._log(u'SickGear snatched this episode, marking it safe to replace', logger.DEBUG) + self._log('SickGear snatched this episode, marking it safe to replace', logger.DEBUG) return True old_ep_status, old_ep_quality = common.Quality.split_composite_status(ep_obj.status) # if old episode is not downloaded/archived then it's safe if common.DOWNLOADED != old_ep_status and common.ARCHIVED != old_ep_status: - self._log(u'Existing episode status is not downloaded/archived, marking it safe to replace', logger.DEBUG) + self._log('Existing episode status is not downloaded/archived, marking it safe to replace', logger.DEBUG) return True if common.ARCHIVED == old_ep_status and common.Quality.NONE == old_ep_quality: - self._log(u'Marking it unsafe to replace because the existing episode status is archived', logger.DEBUG) + self._log('Marking it unsafe to replace because the existing episode status is archived', logger.DEBUG) return False # Status downloaded. Quality/ size checks # if manual post process option is set to force_replace then it's safe if self.force_replace: - self._log(u'Force replace existing episode option is enabled, marking it safe to replace', logger.DEBUG) + self._log('Force replace existing episode option is enabled, marking it safe to replace', logger.DEBUG) return True # if the file processed is higher quality than the existing episode then it's safe if new_ep_quality > old_ep_quality: if common.Quality.UNKNOWN != new_ep_quality: - self._log(u'Existing episode status is not snatched but the episode to process appears to be better' - u' quality than existing episode, marking it safe to replace', logger.DEBUG) + self._log('Existing episode status is not snatched but the episode to process appears to be better' + ' quality than existing episode, marking it safe to replace', logger.DEBUG) return True else: - self._log(u'Marking it unsafe to replace because an existing episode exists in the database and' - u' the episode to process has unknown quality', logger.DEBUG) + self._log('Marking it unsafe to replace because an existing episode exists in the database and' + ' the episode to process has unknown quality', logger.DEBUG) return False existing_file_status = self._check_for_existing_file(ep_obj.location) if PostProcessor.DOESNT_EXIST == existing_file_status \ and (existing_show_path or sickgear.CREATE_MISSING_SHOW_DIRS): - self._log(u'.. there is no file to replace, marking it safe to continue', logger.DEBUG) + self._log('.. there is no file to replace, marking it safe to continue', logger.DEBUG) return True # if there's an existing downloaded file with same quality, check filesize to decide @@ -946,48 +947,47 @@ class PostProcessor(object): npr.is_anime, check_is_repack=True) if new_proper_level > cur_proper_level and \ (not is_repack or npr.release_group == ep_obj.release_group): - self._log(u'Proper or repack with same quality, marking it safe to replace', logger.DEBUG) + self._log('Proper or repack with same quality, marking it safe to replace', logger.DEBUG) return True - self._log(u'An episode exists in the database with the same quality as the episode to process', - logger.DEBUG) + self._log('An episode exists in the database with the same quality as the episode to process', logger.DEBUG) - self._log(u'Checking size of existing file ' + ep_obj.location, logger.DEBUG) + self._log(f'Checking size of existing file {ep_obj.location}', logger.DEBUG) if PostProcessor.EXISTS_SMALLER == existing_file_status: # File exists and new file is larger, marking it safe to replace - self._log(u'.. the existing smaller file will be replaced', logger.DEBUG) + self._log('.. the existing smaller file will be replaced', logger.DEBUG) return True elif PostProcessor.EXISTS_LARGER == existing_file_status: # File exists and new file is smaller, marking it unsafe to replace - self._log(u'.. marking it unsafe to replace the existing larger file', logger.DEBUG) + self._log('.. marking it unsafe to replace the existing larger file', logger.DEBUG) return False elif PostProcessor.EXISTS_SAME == existing_file_status: # File exists and new file is same size, marking it unsafe to replace - self._log(u'.. marking it unsafe to replace the existing same size file', logger.DEBUG) + self._log('.. marking it unsafe to replace the existing same size file', logger.DEBUG) return False else: - self._log(u'Unknown file status for: %s This should never happen, please log this as a bug.' - % ep_obj.location, logger.ERROR) + self._log(f'Unknown file status for: {ep_obj.location}' + f' This should never happen, please log this as a bug.', logger.ERROR) return False # if there's an existing file with better quality if old_ep_quality > new_ep_quality and old_ep_quality != common.Quality.UNKNOWN: # Episode already exists in database and processed episode has lower quality, marking it unsafe to replace - self._log(u'Marking it unsafe to replace the episode that already exists in database with a file of lower' - u' quality', logger.DEBUG) + self._log('Marking it unsafe to replace the episode that already exists in database with a file of lower' + ' quality', logger.DEBUG) return False if self.in_history: - self._log(u'SickGear snatched this episode, marking it safe to replace', logger.DEBUG) + self._log('SickGear snatched this episode, marking it safe to replace', logger.DEBUG) return True # None of the conditions were met, marking it unsafe to replace - self._log(u'Marking it unsafe to replace because no positive condition is met, you may force replace but it' - u' would be better to examine the files', logger.DEBUG) + self._log('Marking it unsafe to replace because no positive condition is met, you may force replace but it' + ' would be better to examine the files', logger.DEBUG) return False def _change_ep_objs(self, show_obj, season_number, episode_numbers, quality): @@ -998,7 +998,7 @@ class PostProcessor(object): for cur_ep_obj in [ep_obj] + ep_obj.related_ep_obj: with cur_ep_obj.lock: if self.release_name: - self._log(u'Found release name ' + self.release_name, logger.DEBUG) + self._log(f'Found release name {self.release_name}', logger.DEBUG) cur_ep_obj.release_name = self.release_name or '' @@ -1044,7 +1044,7 @@ class PostProcessor(object): self._log('Successfully processed.', logger.MESSAGE) else: - self._log('Can\'t figure out what show/episode to process', logger.WARNING) + self._log("Can't figure out what show/episode to process", logger.WARNING) raise exceptions_helper.PostProcessingFailed() def process(self): @@ -1054,16 +1054,16 @@ class PostProcessor(object): :rtype: bool """ - self._log(u'Processing... %s%s' % (os.path.relpath(self.file_path, self.folder_path), - (u'
.. from nzb %s' % self.nzb_name, u'')[None is self.nzb_name])) + self._log(f'Processing... {os.path.relpath(self.file_path, self.folder_path)}' + f'{(f"
.. from nzb {self.nzb_name}", "")[None is self.nzb_name]}') if os.path.isdir(self.file_path): - self._log(u'Expecting file %s
.. is actually a directory, skipping' % self.file_path) + self._log(f'Expecting file {self.file_path}
.. is actually a directory, skipping') return False for ignore_file in self.IGNORED_FILESTRINGS: if ignore_file in self.file_path: - self._log(u'File %s
.. is ignored type, skipping' % self.file_path) + self._log(f'File {self.file_path}
.. is ignored type, skipping') return False # reset per-file stuff @@ -1075,10 +1075,10 @@ class PostProcessor(object): # if we don't have it then give up if not show_obj: - self._log(u'Must add show to SickGear before trying to post process an episode', logger.WARNING) + self._log('Must add show to SickGear before trying to post process an episode', logger.WARNING) raise exceptions_helper.PostProcessingFailed() elif None is season_number or not episode_numbers: - self._log(u'Quitting this post process, could not determine what episode this is', logger.DEBUG) + self._log('Quitting this post process, could not determine what episode this is', logger.DEBUG) return False # retrieve/create the corresponding TVEpisode objects @@ -1089,12 +1089,12 @@ class PostProcessor(object): new_ep_quality = self._get_quality(ep_obj) else: new_ep_quality = quality - self._log(u'Using "%s" quality' % common.Quality.qualityStrings[new_ep_quality], logger.DEBUG) + self._log(f'Using "{common.Quality.qualityStrings[new_ep_quality]}" quality', logger.DEBUG) # see if it's safe to replace existing episode (is download snatched, PROPER, better quality) if not self._safe_replace(ep_obj, new_ep_quality): # if it's not safe to replace, stop here - self._log(u'Quitting this post process', logger.DEBUG) + self._log('Quitting this post process', logger.DEBUG) return False # delete the existing file (and company) @@ -1107,7 +1107,7 @@ class PostProcessor(object): helpers.delete_empty_folders(os.path.dirname(cur_ep_obj.location), keep_dir=ep_obj.show_obj.location) except (OSError, IOError): - raise exceptions_helper.PostProcessingFailed(u'Unable to delete existing files') + raise exceptions_helper.PostProcessingFailed('Unable to delete existing files') # set the status of the episodes # for cur_ep_obj in [ep_obj] + ep_obj.related_ep_obj: @@ -1115,14 +1115,14 @@ class PostProcessor(object): # if the show directory doesn't exist then make it if allowed if not os.path.isdir(ep_obj.show_obj.location) and sickgear.CREATE_MISSING_SHOW_DIRS: - self._log(u'Show directory does not exist, creating it', logger.DEBUG) + self._log('Show directory does not exist, creating it', logger.DEBUG) try: os.mkdir(ep_obj.show_obj.location) # do the library update for synoindex notifiers.NotifierFactory().get('SYNOINDEX').addFolder(ep_obj.show_obj.location) except (OSError, IOError): - raise exceptions_helper.PostProcessingFailed(u'Unable to create show directory: ' - + ep_obj.show_obj.location) + raise exceptions_helper.PostProcessingFailed(f'Unable to create show directory:' + f' {ep_obj.show_obj.location}') # get metadata for the show (but not episode because it hasn't been fully processed) ep_obj.show_obj.write_metadata(True) @@ -1132,7 +1132,7 @@ class PostProcessor(object): # Just want to keep this consistent for failed handling right now release_name = show_name_helpers.determine_release_name(self.folder_path, self.nzb_name) if None is release_name: - self._log(u'No snatched release found in history', logger.WARNING) + self._log('No snatched release found in history', logger.WARNING) elif sickgear.USE_FAILED_DOWNLOADS: failed_history.remove_failed(release_name) @@ -1144,13 +1144,13 @@ class PostProcessor(object): except exceptions_helper.ShowDirNotFoundException: raise exceptions_helper.PostProcessingFailed( - u'Unable to post process an episode because the show dir does not exist, quitting') + 'Unable to post process an episode because the show dir does not exist, quitting') - self._log(u'Destination folder for this episode is ' + dest_path, logger.DEBUG) + self._log(f'Destination folder for this episode is {dest_path}', logger.DEBUG) # create any folders we need if not helpers.make_path(dest_path, syno=True): - raise exceptions_helper.PostProcessingFailed(u'Unable to create destination folder: ' + dest_path) + raise exceptions_helper.PostProcessingFailed(f'Unable to create destination folder: {dest_path}') # figure out the base name of the resulting episode file if sickgear.RENAME_EPISODES: @@ -1174,7 +1174,7 @@ class PostProcessor(object): while not stop_event.is_set(): stop_event.wait(60) webh('.') - webh(u'\n') + webh('\n') keepalive_stop = threading.Event() keepalive = threading.Thread(target=keep_alive, args=(self.webhandler, keepalive_stop)) @@ -1185,7 +1185,7 @@ class PostProcessor(object): 'new_base_name': new_base_name, 'associated_files': sickgear.MOVE_ASSOCIATED_FILES} args_cpmv = {'subtitles': sickgear.USE_SUBTITLES and ep_obj.show_obj.subtitles, - 'action_tmpl': u' %s
.. to %s'} + 'action_tmpl': ' %s
.. to %s'} args_cpmv.update(args_link) if self.webhandler: self.webhandler('Processing method is "%s"' % self.process_method) @@ -1199,10 +1199,10 @@ class PostProcessor(object): elif 'symlink' == self.process_method: self._move_and_symlink(**args_link) else: - logger.log(u'Unknown process method: ' + str(self.process_method), logger.ERROR) - raise exceptions_helper.PostProcessingFailed(u'Unable to move the files to the new location') + logger.error(f'Unknown process method: {self.process_method}') + raise exceptions_helper.PostProcessingFailed('Unable to move the files to the new location') except (OSError, IOError): - raise exceptions_helper.PostProcessingFailed(u'Unable to move the files to the new location') + raise exceptions_helper.PostProcessingFailed('Unable to move the files to the new location') finally: if self.webhandler: # stop the keep_alive diff --git a/sickgear/processTV.py b/sickgear/processTV.py index 78fff9fd..f6a0a5bb 100644 --- a/sickgear/processTV.py +++ b/sickgear/processTV.py @@ -70,7 +70,7 @@ class ProcessTVShow(object): @property def result(self, pre=True): # type: (bool) -> AnyStr - return (('
', u'\n')[pre]).join(self._output) + return (('
', '\n')[pre]).join(self._output) def _buffer(self, text=None): if None is not text: @@ -78,7 +78,7 @@ class ProcessTVShow(object): if self.webhandler: logger_msg = re.sub(r'(?i)', '\n', text) logger_msg = re.sub('(?i)]+>([^<]+)', r'\1', logger_msg) - self.webhandler('%s%s' % (logger_msg, u'\n')) + self.webhandler('%s%s' % (logger_msg, '\n')) def _log_helper(self, message, log_level=logger.DEBUG): """ @@ -90,7 +90,7 @@ class ProcessTVShow(object): """ logger_msg = re.sub(r'(?i)\.*', '', message) logger_msg = re.sub('(?i)]+>([^<]+)', r'\1', logger_msg) - logger.log(u'%s' % logger_msg, log_level) + logger.log(f'{logger_msg}', log_level) self._buffer(message) return @@ -136,14 +136,14 @@ class ProcessTVShow(object): try: shutil.rmtree(folder) except (OSError, IOError) as e: - logger.log(u'Warning: unable to delete folder: %s: %s' % (folder, ex(e)), logger.WARNING) + logger.warning(f'Warning: unable to delete folder: {folder}: {ex(e)}') return False if os.path.isdir(folder): - logger.log(u'Warning: unable to delete folder: %s' % folder, logger.WARNING) + logger.warning(f'Warning: unable to delete folder: {folder}') return False - self._log_helper(u'Deleted folder ' + folder, logger.MESSAGE) + self._log_helper(f'Deleted folder {folder}', logger.MESSAGE) return True def _delete_files(self, process_path, notwanted_files, force=False): @@ -170,18 +170,18 @@ class ProcessTVShow(object): file_attribute = os.stat(cur_file_path)[0] if not file_attribute & stat.S_IWRITE: # File is read-only, so make it writeable - self._log_helper(u'Changing ReadOnly flag for file ' + cur_file) + self._log_helper(f'Changing ReadOnly flag for file {cur_file}') try: os.chmod(cur_file_path, stat.S_IWRITE) except OSError as e: - self._log_helper(u'Cannot change permissions of %s: %s' % (cur_file_path, ex(e))) + self._log_helper(f'Cannot change permissions of {cur_file_path}: {ex(e)}') removal_type = helpers.remove_file(cur_file_path) if os.path.isfile(cur_file_path): result = False else: - self._log_helper(u'%s file %s' % (removal_type, cur_file)) + self._log_helper(f'{removal_type} file {cur_file}') return result @@ -209,7 +209,7 @@ class ProcessTVShow(object): show_obj = helpers.find_show_by_id({int(sql_result[-1]['indexer']): int(sql_result[-1]['showid'])}, check_multishow=True) if hasattr(show_obj, 'name'): - logger.log('Found Show: %s in snatch history for: %s' % (show_obj.name, name), logger.DEBUG) + logger.debug('Found Show: %s in snatch history for: %s' % (show_obj.name, name)) except MultipleShowObjectsException: show_obj = None return show_obj @@ -319,19 +319,19 @@ class ProcessTVShow(object): elif dir_name and sickgear.TV_DOWNLOAD_DIR and os.path.isdir(sickgear.TV_DOWNLOAD_DIR)\ and os.path.normpath(dir_name) != os.path.normpath(sickgear.TV_DOWNLOAD_DIR): dir_name = os.path.join(sickgear.TV_DOWNLOAD_DIR, os.path.abspath(dir_name).split(os.path.sep)[-1]) - self._log_helper(u'SickGear PP Config, completed TV downloads folder: ' + sickgear.TV_DOWNLOAD_DIR) + self._log_helper(f'SickGear PP Config, completed TV downloads folder: {sickgear.TV_DOWNLOAD_DIR}') if dir_name: - self._log_helper(u'Checking folder... ' + dir_name) + self._log_helper(f'Checking folder... {dir_name}') # if we didn't find a real directory then process "failed" or just quit if not dir_name or not os.path.isdir(dir_name): if nzb_name and failed: self._process_failed(dir_name, nzb_name, show_obj=show_obj) else: - self._log_helper(u'Unable to figure out what folder to process. ' + - u'If your downloader and SickGear aren\'t on the same PC then make sure ' + - u'you fill out your completed TV download folder in the PP config.') + self._log_helper('Unable to figure out what folder to process. ' + 'If your downloader and SickGear aren\'t on the same PC then make sure ' + 'you fill out your completed TV download folder in the PP config.') return self.result parent = self.find_parent(dir_name) @@ -352,13 +352,13 @@ class ProcessTVShow(object): path, dirs, files = self._get_path_dir_files(dir_name, nzb_name, pp_type) if sickgear.POSTPONE_IF_SYNC_FILES and any(filter(helpers.is_sync_file, files)): - self._log_helper(u'Found temporary sync files, skipping post process', logger.ERROR) + self._log_helper('Found temporary sync files, skipping post process', logger.ERROR) return self.result if not process_method: process_method = sickgear.PROCESS_METHOD - self._log_helper(u'Processing folder... %s' % path) + self._log_helper(f'Processing folder... {path}') work_files = [] joined = self.join(path) @@ -380,13 +380,13 @@ class ProcessTVShow(object): work_files += [os.path.join(path, item) for item in rar_content] if 0 < len(files): - self._log_helper(u'Process file%s: %s' % (helpers.maybe_plural(files), str(files))) + self._log_helper(f'Process file{helpers.maybe_plural(files)}: {str(files)}') if 0 < len(video_files): - self._log_helper(u'Process video file%s: %s' % (helpers.maybe_plural(video_files), str(video_files))) + self._log_helper(f'Process video file{helpers.maybe_plural(video_files)}: {str(video_files)}') if 0 < len(rar_content): - self._log_helper(u'Process rar content: ' + str(rar_content)) + self._log_helper(f'Process rar content: {rar_content}') if 0 < len(video_in_rar): - self._log_helper(u'Process video%s in rar: %s' % (helpers.maybe_plural(video_in_rar), str(video_in_rar))) + self._log_helper(f'Process video{helpers.maybe_plural(video_in_rar)} in rar: {str(video_in_rar)}') # If nzb_name is set and there's more than one videofile in the folder, files will be lost (overwritten). nzb_name_original = nzb_name @@ -425,8 +425,7 @@ class ProcessTVShow(object): force, force_replace, use_trash=cleanup, show_obj=show_obj) except OSError as e: - logger.log('Batch skipped, %s%s' % - (ex(e), e.filename and (' (file %s)' % e.filename) or ''), logger.WARNING) + logger.warning('Batch skipped, %s%s' % (ex(e), e.filename and (' (file %s)' % e.filename) or '')) # Process video files in TV subdirectories for directory in [x for x in dirs if self._validate_dir( @@ -438,7 +437,7 @@ class ProcessTVShow(object): for walk_path, walk_dir, files in os.walk(os.path.join(path, directory), topdown=False): if sickgear.POSTPONE_IF_SYNC_FILES and any(filter(helpers.is_sync_file, files)): - self._log_helper(u'Found temporary sync files, skipping post process', logger.ERROR) + self._log_helper('Found temporary sync files, skipping post process', logger.ERROR) return self.result parent = self.find_parent(walk_path) @@ -493,8 +492,7 @@ class ProcessTVShow(object): self.check_video_filenames(walk_dir, video_pick))) except OSError as e: - logger.log('Batch skipped, %s%s' % - (ex(e), e.filename and (' (file %s)' % e.filename) or ''), logger.WARNING) + logger.warning(f'Batch skipped, {ex(e)}{e.filename and (" (file %s)" % e.filename) or ""}') if process_method in ('hardlink', 'symlink') and video_in_rar: self._delete_files(walk_path, rar_content) @@ -526,12 +524,13 @@ class ProcessTVShow(object): if self.any_vid_processed: if not self.files_failed: - _bottom_line(u'Successfully processed.', logger.MESSAGE) + _bottom_line('Successfully processed.', logger.MESSAGE) else: - _bottom_line(u'Successfully processed at least one video file%s.' % - (', others were skipped', ' and skipped another')[1 == self.files_failed], logger.MESSAGE) + _bottom_line(f'Successfully processed at least one video file' + f'{(", others were skipped", " and skipped another")[1 == self.files_failed]}.', + logger.MESSAGE) else: - _bottom_line(u'Failed! Did not process any files.', logger.WARNING) + _bottom_line('Failed! Did not process any files.', logger.WARNING) return self.result @@ -599,16 +598,16 @@ class ProcessTVShow(object): :return: success :rtype: bool """ - self._log_helper(u'Processing sub dir: ' + dir_name) + self._log_helper(f'Processing sub dir: {dir_name}') if os.path.basename(dir_name).startswith('_FAILED_'): - self._log_helper(u'The directory name indicates it failed to extract.') + self._log_helper('The directory name indicates it failed to extract.') failed = True elif os.path.basename(dir_name).startswith('_UNDERSIZED_'): - self._log_helper(u'The directory name indicates that it was previously rejected for being undersized.') + self._log_helper('The directory name indicates that it was previously rejected for being undersized.') failed = True elif os.path.basename(dir_name).upper().startswith('_UNPACK'): - self._log_helper(u'The directory name indicates that this release is in the process of being unpacked.') + self._log_helper('The directory name indicates that this release is in the process of being unpacked.') return False if failed: @@ -616,7 +615,7 @@ class ProcessTVShow(object): return False if helpers.is_hidden_folder(dir_name): - self._log_helper(u'Ignoring hidden folder: ' + dir_name) + self._log_helper(f'Ignoring hidden folder: {dir_name}') return False # make sure the directory isn't inside a show directory @@ -626,9 +625,7 @@ class ProcessTVShow(object): for cur_result in sql_result: if dir_name.lower().startswith(os.path.realpath(cur_result['location']).lower() + os.sep) \ or dir_name.lower() == os.path.realpath(cur_result['location']).lower(): - self._log_helper( - u'Found an episode that has already been moved to its show dir, skipping', - logger.ERROR) + self._log_helper('Found an episode that has already been moved to its show dir, skipping', logger.ERROR) return False # Get the videofile list for the next checks @@ -686,16 +683,16 @@ class ProcessTVShow(object): if sickgear.UNPACK and rar_files: - self._log_helper(u'Packed releases detected: ' + str(rar_files)) + self._log_helper(f'Packed releases detected: {rar_files}') for archive in rar_files: - self._log_helper(u'Unpacking archive: ' + archive) + self._log_helper(f'Unpacking archive: {archive}') try: rar_handle = rarfile.RarFile(os.path.join(path, archive)) except (BaseException, Exception): - self._log_helper(u'Failed to open archive: %s' % archive, logger.ERROR) + self._log_helper(f'Failed to open archive: {archive}', logger.ERROR) self._set_process_success(False) continue try: @@ -704,8 +701,7 @@ class ProcessTVShow(object): for file_in_archive in [os.path.basename(x.filename) for x in rar_handle.infolist() if not x.is_dir()]: if self._already_postprocessed(path, file_in_archive, force): - self._log_helper( - u'Archive file already processed, extraction skipped: ' + file_in_archive) + self._log_helper(f'Archive file already processed, extraction skipped: {file_in_archive}') skip_file = True break @@ -719,14 +715,14 @@ class ProcessTVShow(object): renamed = self.cleanup_names(path, rar_content) cur_unpacked = rar_content if not renamed else \ (list(set(rar_content) - set(iterkeys(renamed))) + list(renamed.values())) - self._log_helper(u'Unpacked content: [u\'%s\']' % '\', u\''.join(map(text_type, cur_unpacked))) + self._log_helper('Unpacked content: ["%s"]' % '", "'.join(map(text_type, cur_unpacked))) unpacked_files += cur_unpacked except (rarfile.PasswordRequired, rarfile.RarWrongPassword): - self._log_helper(u'Failed to unpack archive PasswordRequired: %s' % archive, logger.ERROR) + self._log_helper(f'Failed to unpack archive PasswordRequired: {archive}', logger.ERROR) self._set_process_success(False) self.fail_detected = True except (BaseException, Exception): - self._log_helper(u'Failed to unpack archive: %s' % archive, logger.ERROR) + self._log_helper(f'Failed to unpack archive: {archive}', logger.ERROR) self._set_process_success(False) finally: rar_handle.close() @@ -738,11 +734,11 @@ class ProcessTVShow(object): try: rar_handle = rarfile.RarFile(os.path.join(path, archive)) except (BaseException, Exception): - self._log_helper(u'Failed to open archive: %s' % archive, logger.ERROR) + self._log_helper(f'Failed to open archive: {archive}', logger.ERROR) continue try: if rar_handle.needs_password(): - self._log_helper(u'Failed to unpack archive PasswordRequired: %s' % archive, logger.ERROR) + self._log_helper(f'Failed to unpack archive PasswordRequired: {archive}', logger.ERROR) self._set_process_success(False) self.failure_detected = True rar_handle.close() @@ -813,7 +809,7 @@ class ProcessTVShow(object): is_renamed[os.path.relpath(file_path, directory)] = \ os.path.relpath(new_filename + file_extension, directory) except OSError as _e: - logger.log('Error unable to rename file "%s" because %s' % (cur_filename, ex(_e)), logger.ERROR) + logger.error('Error unable to rename file "%s" because %s' % (cur_filename, ex(_e))) elif helpers.has_media_ext(cur_filename) and \ None is not garbage_name.search(file_name) and None is not media_pattern.search(base_name): _num_videos += 1 @@ -836,7 +832,7 @@ class ProcessTVShow(object): os.rename(old_name, new_name) is_renamed[os.path.relpath(old_name, directory)] = os.path.relpath(new_name, directory) except OSError as e: - logger.log('Error unable to rename file "%s" because %s' % (old_name, ex(e)), logger.ERROR) + logger.error('Error unable to rename file "%s" because %s' % (old_name, ex(e))) return is_renamed @@ -876,7 +872,7 @@ class ProcessTVShow(object): try: os.rename(base_filepath, outfile) except OSError: - logger.log('Error unable to rename file %s' % base_filepath, logger.ERROR) + logger.error('Error unable to rename file %s' % base_filepath) return result chunk_set.append(outfile) chunk_set.sort() @@ -957,8 +953,8 @@ class ProcessTVShow(object): my_db = db.DBConnection() sql_result = my_db.select('SELECT * FROM tv_episodes WHERE release_name = ?', [dir_name]) if sql_result: - self._log_helper(u'Found a release directory %s that has already been processed,
.. skipping: %s' - % (showlink, dir_name)) + self._log_helper(f'Found a release directory {showlink} that has already been processed,
' + f'.. skipping: {dir_name}') if ep_detail_sql: reset_status(parse_result.show_obj.tvid, parse_result.show_obj.prodid, @@ -972,8 +968,8 @@ class ProcessTVShow(object): sql_result = my_db.select( 'SELECT * FROM tv_episodes WHERE release_name = ?', [videofile.rpartition('.')[0]]) if sql_result: - self._log_helper(u'Found a video, but that release %s was already processed,
.. skipping: %s' - % (showlink, videofile)) + self._log_helper(f'Found a video, but that release {showlink} was already processed,
' + f'.. skipping: {videofile}') if ep_detail_sql: reset_status(parse_result.show_obj.tvid, parse_result.show_obj.prodid, @@ -991,10 +987,10 @@ class ProcessTVShow(object): + ' and tv_episodes.status IN (%s)' % ','.join([str(x) for x in common.Quality.DOWNLOADED])\ + ' and history.resource LIKE ?' - sql_result = my_db.select(search_sql, [u'%' + videofile]) + sql_result = my_db.select(search_sql, [f'%{videofile}']) if sql_result: - self._log_helper(u'Found a video, but the episode %s is already processed,
.. skipping: %s' - % (showlink, videofile)) + self._log_helper(f'Found a video, but the episode {showlink} is already processed,
' + f'.. skipping: {videofile}') if ep_detail_sql: reset_status(parse_result.show_obj.tvid, parse_result.show_obj.prodid, @@ -1051,7 +1047,7 @@ class ProcessTVShow(object): process_fail_message = '' except exceptions_helper.PostProcessingFailed: file_success = False - process_fail_message = '
.. Post Processing Failed' + process_fail_message = '
.. Post Processing Failed' self._set_process_success(file_success) @@ -1059,13 +1055,11 @@ class ProcessTVShow(object): self._buffer(processor.log.strip('\n')) if file_success: - self._log_helper(u'Successfully processed ' + cur_video_file, logger.MESSAGE) + self._log_helper(f'Successfully processed {cur_video_file}', logger.MESSAGE) elif self.any_vid_processed: - self._log_helper(u'Warning fail for %s%s' % (cur_video_file_path, process_fail_message), - logger.WARNING) + self._log_helper(f'Warning fail for {cur_video_file_path}{process_fail_message}', logger.WARNING) else: - self._log_helper(u'Did not use file %s%s' % (cur_video_file_path, process_fail_message), - logger.WARNING) + self._log_helper(f'Did not use file {cur_video_file_path}{process_fail_message}', logger.WARNING) @staticmethod def _get_path_dir_files(dir_name, nzb_name, pp_type): @@ -1131,13 +1125,12 @@ class ProcessTVShow(object): if sickgear.DELETE_FAILED and self.any_vid_processed: self._delete_folder(dir_name, check_empty=False) - task = u'Failed download processing' + task = 'Failed download processing' if self.any_vid_processed: - self._log_helper(u'Successful %s: (%s, %s)' - % (task.lower(), str(nzb_name), dir_name), logger.MESSAGE) + self._log_helper(f'Successful {task.lower()}: ({str(nzb_name)}, {dir_name})', logger.MESSAGE) else: - self._log_helper(u'%s failed: (%s, %s): %s' - % (task, str(nzb_name), dir_name, process_fail_message), logger.WARNING) + self._log_helper(f'{task} failed: ({str(nzb_name)}, {dir_name}): {process_fail_message}', + logger.WARNING) def process_minimal(self, nzb_name, show_obj, failed, webhandler): if failed: diff --git a/sickgear/properFinder.py b/sickgear/properFinder.py index 9d66fd5d..12a07326 100644 --- a/sickgear/properFinder.py +++ b/sickgear/properFinder.py @@ -185,7 +185,7 @@ def load_webdl_types(): try: for line in url_data.splitlines(): try: - (key, val) = line.strip().split(u'::', 1) + (key, val) = line.strip().split('::', 1) except (BaseException, Exception): continue if None is key or None is val: @@ -218,10 +218,10 @@ def _search_provider(cur_provider, provider_propers, aired_since_shows, recent_s provider_propers.extend(cur_provider.find_propers(search_date=aired_since_shows, shows=recent_shows, anime=recent_anime)) except AuthException as e: - logger.log('Authentication error: %s' % ex(e), logger.ERROR) + logger.error('Authentication error: %s' % ex(e)) except (BaseException, Exception) as e: - logger.log('Error while searching %s, skipping: %s' % (cur_provider.name, ex(e)), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('Error while searching %s, skipping: %s' % (cur_provider.name, ex(e))) + logger.error(traceback.format_exc()) if not provider_propers: logger.log('No Proper releases found at [%s]' % cur_provider.name) @@ -306,8 +306,8 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime cur_proper.parsed_show_obj = (cur_proper.parsed_show_obj or helpers.find_show_by_id(parse_result.show_obj.tvid_prodid)) if None is cur_proper.parsed_show_obj: - logger.log('Skip download; cannot find show with ID [%s] at %s' % - (cur_proper.prodid, sickgear.TVInfoAPI(cur_proper.tvid).name), logger.ERROR) + logger.error('Skip download; cannot find show with ID [%s] at %s' % + (cur_proper.prodid, sickgear.TVInfoAPI(cur_proper.tvid).name)) continue cur_proper.tvid = cur_proper.parsed_show_obj.tvid @@ -319,26 +319,25 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime # only get anime Proper if it has release group and version if parse_result.is_anime and not parse_result.release_group and -1 == parse_result.version: - logger.log('Ignored Proper with no release group and version in name [%s]' % cur_proper.name, - logger.DEBUG) + logger.debug('Ignored Proper with no release group and version in name [%s]' % cur_proper.name) continue if not show_name_helpers.pass_wordlist_checks(cur_proper.name, parse=False, indexer_lookup=False, show_obj=cur_proper.parsed_show_obj): - logger.log('Ignored unwanted Proper [%s]' % cur_proper.name, logger.DEBUG) + logger.debug('Ignored unwanted Proper [%s]' % cur_proper.name) continue re_x = dict(re_prefix='.*', re_suffix='.*') result = show_name_helpers.contains_any(cur_proper.name, cur_proper.parsed_show_obj.rls_ignore_words, rx=cur_proper.parsed_show_obj.rls_ignore_words_regex, **re_x) if None is not result and result: - logger.log('Ignored Proper containing ignore word [%s]' % cur_proper.name, logger.DEBUG) + logger.debug('Ignored Proper containing ignore word [%s]' % cur_proper.name) continue result = show_name_helpers.contains_any(cur_proper.name, cur_proper.parsed_show_obj.rls_require_words, rx=cur_proper.parsed_show_obj.rls_require_words_regex, **re_x) if None is not result and not result: - logger.log('Ignored Proper for not containing any required word [%s]' % cur_proper.name, logger.DEBUG) + logger.debug('Ignored Proper for not containing any required word [%s]' % cur_proper.name) continue cur_size = getattr(cur_proper, 'size', None) @@ -419,15 +418,15 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime old_webdl_type = get_webdl_type(old_extra_no_name, old_name) new_webdl_type = get_webdl_type(parse_result.extra_info_no_name(), cur_proper.name) if old_webdl_type != new_webdl_type: - logger.log('Ignored Proper webdl source [%s], does not match existing webdl source [%s] for [%s]' - % (old_webdl_type, new_webdl_type, cur_proper.name), logger.DEBUG) + logger.debug(f'Ignored Proper webdl source [{old_webdl_type}], does not match existing webdl source' + f' [{new_webdl_type}] for [{cur_proper.name}]') continue # for webdls, prevent Propers from different groups log_same_grp = 'Ignored Proper from release group [%s] does not match existing group [%s] for [%s]' \ % (parse_result.release_group, old_release_group, cur_proper.name) if sickgear.PROPERS_WEBDL_ONEGRP and is_web and not same_release_group: - logger.log(log_same_grp, logger.DEBUG) + logger.debug(log_same_grp) continue # check if we actually want this Proper (if it's the right release group and a higher version) @@ -436,7 +435,7 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime if not (-1 < old_version < parse_result.version): continue if not same_release_group: - logger.log(log_same_grp, logger.DEBUG) + logger.debug(log_same_grp) continue found_msg = 'Found anime Proper v%s to replace v%s' % (parse_result.version, old_version) else: @@ -454,7 +453,7 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime # skip if the episode has never downloaded, because a previous quality is required to match the Proper if not len(history_results): - logger.log('Ignored Proper cannot find a recent history item for [%s]' % cur_proper.name, logger.DEBUG) + logger.debug('Ignored Proper cannot find a recent history item for [%s]' % cur_proper.name) continue # make sure that none of the existing history downloads are the same Proper as the download candidate @@ -471,7 +470,7 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime logger.log('Ignored Proper already in history [%s]' % cur_proper.name) continue - logger.log(found_msg, logger.DEBUG) + logger.debug(found_msg) # finish populating the Proper instance # cur_proper.show_obj = cur_proper.parsed_show_obj.prodid @@ -557,16 +556,14 @@ def _download_propers(proper_list): if reject: if isinstance(reject, string_types): if scene_rej_nuked and not scene_nuked_active: - logger.log('Rejecting nuked release. Nuke reason [%s] source [%s]' % (reject, url), - logger.DEBUG) + logger.debug('Rejecting nuked release. Nuke reason [%s] source [%s]' % (reject, url)) else: - logger.log('Considering nuked release. Nuke reason [%s] source [%s]' % (reject, url), - logger.DEBUG) + logger.debug('Considering nuked release. Nuke reason [%s] source [%s]' % (reject, url)) reject = False elif scene_contains or non_scene_fallback: reject = False else: - logger.log('Rejecting as not scene release listed at any [%s]' % url, logger.DEBUG) + logger.debug('Rejecting as not scene release listed at any [%s]' % url) if reject: continue @@ -685,7 +682,7 @@ def _generic_name(name): def _set_last_proper_search(when): - logger.log(u'Setting the last Proper search in the DB to %s' % when, logger.DEBUG) + logger.debug(f'Setting the last Proper search in the DB to {when}') my_db = db.DBConnection() sql_result = my_db.select('SELECT * FROM info') diff --git a/sickgear/providers/__init__.py b/sickgear/providers/__init__.py index 1695162f..ecc0b6d4 100644 --- a/sickgear/providers/__init__.py +++ b/sickgear/providers/__init__.py @@ -177,7 +177,7 @@ def _create_newznab_source(config_string): except IndexError: params.update({k: d}) else: - logger.log(u'Skipping Newznab provider string: \'%s\', incorrect format' % config_string, logger.ERROR) + logger.error(f'Skipping Newznab provider string: \'{config_string}\', incorrect format') return None newznab_module = sys.modules['sickgear.providers.newznab'] @@ -213,8 +213,7 @@ def _create_torrent_rss_source(config_string): url = values[1] enabled = values[3] except ValueError: - logger.log(u"Skipping RSS Torrent provider string: '" + config_string + "', incorrect format", - logger.ERROR) + logger.error(f'Skipping RSS Torrent provider string: \'{config_string}\', incorrect format') return None try: diff --git a/sickgear/providers/alpharatio.py b/sickgear/providers/alpharatio.py index eb4e9a2e..bbb46c0d 100644 --- a/sickgear/providers/alpharatio.py +++ b/sickgear/providers/alpharatio.py @@ -105,7 +105,7 @@ class AlphaRatioProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) results = self._sort_seeding(mode, results + items[mode]) diff --git a/sickgear/providers/bithdtv.py b/sickgear/providers/bithdtv.py index 86e37964..b620519a 100644 --- a/sickgear/providers/bithdtv.py +++ b/sickgear/providers/bithdtv.py @@ -48,7 +48,7 @@ class BitHDTVProvider(generic.TorrentProvider): [(None is y or re.search(r'(?i)rss\slink', y)), self.has_all_cookies(['su', 'sp', 'sl'], 'h_'), 'search' in self.urls] + [(self.session.cookies.get('h_' + x) or 'sg!no!pw') in self.digest for x in ('su', 'sp', 'sl')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) @staticmethod def _has_signature(data=None): @@ -110,7 +110,7 @@ class BitHDTVProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/blutopia.py b/sickgear/providers/blutopia.py index c8458a22..b69664b1 100644 --- a/sickgear/providers/blutopia.py +++ b/sickgear/providers/blutopia.py @@ -54,7 +54,7 @@ class BlutopiaProvider(generic.TorrentProvider): def _authorised(self, **kwargs): return super(BlutopiaProvider, self)._authorised( - logged_in=self.logged_in, failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + logged_in=self.logged_in, failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) def logged_in(self, resp=None): @@ -102,7 +102,7 @@ class BlutopiaProvider(generic.TorrentProvider): show_type = self.show_obj.air_by_date and 'Air By Date' \ or self.show_obj.is_sports and 'Sports' or None if show_type: - logger.log(u'Provider does not carry shows of type: [%s], skipping' % show_type, logger.DEBUG) + logger.debug(f'Provider does not carry shows of type: [{show_type}], skipping') return results for search_string in search_params[mode]: @@ -159,7 +159,7 @@ class BlutopiaProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, log + search_url) diff --git a/sickgear/providers/btn.py b/sickgear/providers/btn.py index be0fb5da..6b87bff9 100644 --- a/sickgear/providers/btn.py +++ b/sickgear/providers/btn.py @@ -75,8 +75,7 @@ class BTNProvider(generic.TorrentProvider): self.tmr_limit_update('1', 'h', '150/hr %s' % data) self.log_failure_url(url, post_data, post_json) else: - logger.log(u'Action prematurely ended. %(prov)s server error response = %(desc)s' % - {'prov': self.name, 'desc': data}, logger.WARNING) + logger.warning(f'Action prematurely ended. {self.name} server error response = {data}') def _search_provider(self, search_params, age=0, **kwargs): @@ -118,7 +117,7 @@ class BTNProvider(generic.TorrentProvider): self._check_response(error_text, self.url_api, post_data=json_rpc(params)) return results except AuthException: - logger.log('API looks to be down, add un/pw config detail to be used as a fallback', logger.WARNING) + logger.warning('API looks to be down, add un/pw config detail to be used as a fallback') except (KeyError, Exception): pass @@ -247,7 +246,7 @@ class BTNProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(results) - cnt, search_url) @@ -267,7 +266,7 @@ class BTNProvider(generic.TorrentProvider): else: # If we don't have a release name we need to get creative - title = u'' + title = '' keys = ['Series', 'GroupName', 'Resolution', 'Source', 'Codec'] for key in keys: if key in data_json: @@ -353,8 +352,8 @@ class BTNProvider(generic.TorrentProvider): # Set maximum to 24 hours (24 * 60 * 60 = 86400 seconds) of "RSS" data search, # older items will be done through backlog if 86400 < seconds_since_last_update: - logger.log(u'Only trying to fetch the last 24 hours even though the last known successful update on ' + - '%s was over 24 hours' % self.name, logger.WARNING) + logger.warning(f'Only trying to fetch the last 24 hours even though the last known successful update on' + f' {self.name} was over 24 hours') seconds_since_last_update = 86400 return self._search_provider(dict(Cache=['']), age=seconds_since_last_update) diff --git a/sickgear/providers/eztv.py b/sickgear/providers/eztv.py index 5a723b1b..780d6ebf 100644 --- a/sickgear/providers/eztv.py +++ b/sickgear/providers/eztv.py @@ -106,7 +106,7 @@ class EztvProvider(generic.TorrentProvider): except (generic.HaltParseException, IndexError): pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/fano.py b/sickgear/providers/fano.py index ebb34fc8..471518f4 100644 --- a/sickgear/providers/fano.py +++ b/sickgear/providers/fano.py @@ -122,7 +122,7 @@ class FanoProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, log + search_url) diff --git a/sickgear/providers/filelist.py b/sickgear/providers/filelist.py index 7c883c91..2042f4fc 100644 --- a/sickgear/providers/filelist.py +++ b/sickgear/providers/filelist.py @@ -96,7 +96,7 @@ class FLProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, self.session.response.get('url')) diff --git a/sickgear/providers/filesharingtalk.py b/sickgear/providers/filesharingtalk.py index e97a69ec..1030e272 100644 --- a/sickgear/providers/filesharingtalk.py +++ b/sickgear/providers/filesharingtalk.py @@ -80,7 +80,7 @@ class FSTProvider(generic.NZBProvider): success, msg = self._check_cookie() if success: return False - logger.warning(u'%s: %s' % (msg, self.cookies)) + logger.warning(f'{msg}: {self.cookies}') self.cookies = None return None @@ -166,7 +166,7 @@ class FSTProvider(generic.NZBProvider): time.sleep(1.1) pass except (BaseException, Exception): - logger.error(u'Failed to parse. Traceback: %s' % traceback.format_exc()) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search((mode, search_mode)['Propers' == search_mode], len(results) - cnt, search_url) return results diff --git a/sickgear/providers/funfile.py b/sickgear/providers/funfile.py index e8aecaa0..f7e93b7b 100644 --- a/sickgear/providers/funfile.py +++ b/sickgear/providers/funfile.py @@ -106,7 +106,7 @@ class FunFileProvider(generic.TorrentProvider): except (generic.HaltParseException, AttributeError): pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/generic.py b/sickgear/providers/generic.py index eca6d7a2..17c00f20 100644 --- a/sickgear/providers/generic.py +++ b/sickgear/providers/generic.py @@ -166,9 +166,8 @@ class ProviderFailList(object): with self.lock: self.dirty = True self._fails.append(fail) - logger.log('Adding fail.%s for %s' % (ProviderFailTypes.names.get( - fail.fail_type, ProviderFailTypes.names[ProviderFailTypes.other]), self.provider_name()), - logger.DEBUG) + logger.debug('Adding fail.%s for %s' % (ProviderFailTypes.names.get( + fail.fail_type, ProviderFailTypes.names[ProviderFailTypes.other]), self.provider_name())) self.save_list() def save_list(self): @@ -426,8 +425,8 @@ class GenericProvider(object): if not limit_set: time_index = self.fail_time_index(base_limit=0) self.tmr_limit_wait = self.wait_time(time_index) - logger.log('Request limit reached. Waiting for %s until next retry. Message: %s' % - (self.tmr_limit_wait, desc or 'none found'), logger.WARNING) + logger.warning(f'Request limit reached. Waiting for {self.tmr_limit_wait} until next retry.' + f' Message: {desc or "none found"}') def wait_time(self, time_index=None): # type: (Optional[int]) -> datetime.timedelta @@ -503,8 +502,8 @@ class GenericProvider(object): # Ensure provider name output (e.g. when displaying config/provs) instead of e.g. thread "Tornado" prepend = ('[%s] :: ' % self.name, '')[any(x.name in threading.current_thread().name for x in sickgear.providers.sorted_sources())] - logger.log('%sToo many requests reached at %s, waiting for %s' % ( - prepend, self.fmt_delta(self.tmr_limit_time), self.fmt_delta(time_left)), logger.WARNING) + logger.warning(f'{prepend}Too many requests reached at {self.fmt_delta(self.tmr_limit_time)},' + f' waiting for {self.fmt_delta(time_left)}') return use_tmr_limit else: self.tmr_limit_time = None @@ -515,10 +514,9 @@ class GenericProvider(object): if self.is_waiting(): if log_warning: time_left = self.wait_time() - self.fail_newest_delta() - logger.log('Failed %s times, skipping provider for %s, last failure at %s with fail type: %s' % ( + logger.warning('Failed %s times, skipping provider for %s, last failure at %s with fail type: %s' % ( self.failure_count, self.fmt_delta(time_left), self.fmt_delta(self.failure_time), - ProviderFailTypes.names.get( - self.last_fail, ProviderFailTypes.names[ProviderFailTypes.other])), logger.WARNING) + ProviderFailTypes.names.get(self.last_fail, ProviderFailTypes.names[ProviderFailTypes.other]))) return True return False @@ -533,7 +531,7 @@ class GenericProvider(object): self._last_fail_type = fail_type self.fails.add_fail(*args, **kwargs) else: - logger.log('%s: Not logging same failure within 3 seconds' % self.name, logger.DEBUG) + logger.debug('%s: Not logging same failure within 3 seconds' % self.name) def get_url(self, url, skip_auth=False, use_tmr_limit=True, *args, **kwargs): # type: (AnyStr, bool, bool, Any, Any) -> Optional[AnyStr, Dict] @@ -580,7 +578,7 @@ class GenericProvider(object): if data and not isinstance(data, tuple) \ or isinstance(data, tuple) and data[0]: if 0 != self.failure_count: - logger.log('Unblocking provider: %s' % self.get_id(), logger.DEBUG) + logger.debug('Unblocking provider: %s' % self.get_id()) self.failure_count = 0 self.failure_time = None else: @@ -628,7 +626,7 @@ class GenericProvider(object): post += [' .. Post params: [%s]' % '&'.join([post_data])] if post_json: post += [' .. Json params: [%s]' % '&'.join([post_json])] - logger.log('Failure URL: %s%s' % (url, ''.join(post)), logger.WARNING) + logger.warning('Failure URL: %s%s' % (url, ''.join(post))) def get_id(self): # type: (...) -> AnyStr @@ -812,7 +810,7 @@ class GenericProvider(object): if not btih or not re.search('(?i)[0-9a-f]{32,40}', btih): assert not result.url.startswith('http') - logger.log('Unable to extract torrent hash from link: ' + ex(result.url), logger.ERROR) + logger.error('Unable to extract torrent hash from link: ' + ex(result.url)) return False urls = ['http%s://%s/torrent/%s.torrent' % (u + (btih.upper(),)) @@ -846,14 +844,14 @@ class GenericProvider(object): failure_monitor=False): if self._verify_download(cache_file): - logger.log(u'Downloaded %s result from %s' % (self.name, url)) + logger.log(f'Downloaded {self.name} result from {url}') try: helpers.move_file(cache_file, final_file) msg = 'moved' except (OSError, Exception): msg = 'copied cached file' - logger.log(u'Saved .%s data and %s to %s' % ( - (link_type, 'torrent cache')['magnet' == link_type], msg, final_file)) + logger.log(f'Saved .{(link_type, "torrent cache")["magnet" == link_type]} data' + f' and {msg} to {final_file}') saved = True break @@ -866,7 +864,7 @@ class GenericProvider(object): del(self.session.headers['Referer']) if not saved and 'magnet' == link_type: - logger.log(u'All torrent cache servers failed to return a downloadable result', logger.DEBUG) + logger.debug('All torrent cache servers failed to return a downloadable result') final_file = os.path.join(final_dir, '%s.%s' % (helpers.sanitize_filename(result.name), link_type)) try: with open(final_file, 'wb') as fp: @@ -874,12 +872,12 @@ class GenericProvider(object): fp.flush() os.fsync(fp.fileno()) saved = True - logger.log(u'Saved magnet link to file as some clients (or plugins) support this, %s' % final_file) + logger.log(f'Saved magnet link to file as some clients (or plugins) support this, {final_file}') if 'blackhole' == sickgear.TORRENT_METHOD: logger.log('Tip: If your client fails to load magnet in files, ' + 'change blackhole to a client connection method in search settings') except (BaseException, Exception): - logger.log(u'Failed to save magnet link to file, %s' % final_file) + logger.log(f'Failed to save magnet link to file, {final_file}') elif not saved: if 'torrent' == link_type and result.provider.get_id() in sickgear.PROVIDER_HOMES: t_result = result # type: TorrentSearchResult @@ -895,7 +893,7 @@ class GenericProvider(object): t_result.provider._valid_home(url_exclude=urls) setattr(sickgear, 'PROVIDER_EXCLUDE', ([], urls)[any([t_result.provider.url])]) - logger.log(u'Server failed to return anything useful', logger.ERROR) + logger.error('Server failed to return anything useful') return saved @@ -969,7 +967,7 @@ class GenericProvider(object): except (BaseException, Exception): pass - title = title and re.sub(r'\s+', '.', u'%s' % title) + title = title and re.sub(r'\s+', '.', f'{title}') if url and not re.match('(?i)magnet:', url): url = str(url).replace('&', '&') @@ -1193,10 +1191,10 @@ class GenericProvider(object): try: parse_result = parser.parse(title, release_group=self.get_id()) except InvalidNameException: - logger.log(u'Unable to parse the filename %s into a valid episode' % title, logger.DEBUG) + logger.debug(f'Unable to parse the filename {title} into a valid episode') continue except InvalidShowException: - logger.log(u'No match for search criteria in the parsed filename ' + title, logger.DEBUG) + logger.debug(f'No match for search criteria in the parsed filename {title}') continue if parse_result.show_obj.is_anime: @@ -1208,8 +1206,8 @@ class GenericProvider(object): continue if not (parse_result.show_obj.tvid == show_obj.tvid and parse_result.show_obj.prodid == show_obj.prodid): - logger.debug(u'Parsed show [%s] is not show [%s] we are searching for' % ( - parse_result.show_obj.unique_name, show_obj.unique_name)) + logger.debug(f'Parsed show [{parse_result.show_obj.unique_name}] is not show [{show_obj.unique_name}]' + f' we are searching for') continue parsed_show_obj = parse_result.show_obj @@ -1223,15 +1221,15 @@ class GenericProvider(object): if not (parsed_show_obj.air_by_date or parsed_show_obj.is_sports): if 'sponly' == search_mode: if len(parse_result.episode_numbers): - logger.log(u'This is supposed to be a season pack search but the result ' + title + - u' is not a valid season pack, skipping it', logger.DEBUG) + logger.debug(f'This is supposed to be a season pack search but the result {title}' + f' is not a valid season pack, skipping it') add_cache_entry = True if len(parse_result.episode_numbers) \ and (parse_result.season_number not in set([ep_obj.season for ep_obj in ep_obj_list]) or not [ep_obj for ep_obj in ep_obj_list if ep_obj.scene_episode in parse_result.episode_numbers]): - logger.log(u'The result ' + title + u' doesn\'t seem to be a valid episode that we are trying' + - u' to snatch, ignoring', logger.DEBUG) + logger.debug(f'The result {title} doesn\'t seem to be a valid episode that we are trying' + f' to snatch, ignoring') add_cache_entry = True else: if not len(parse_result.episode_numbers)\ @@ -1239,14 +1237,14 @@ class GenericProvider(object): and not [ep_obj for ep_obj in ep_obj_list if ep_obj.season == parse_result.season_number and ep_obj.episode in parse_result.episode_numbers]: - logger.log(u'The result ' + title + u' doesn\'t seem to be a valid season that we are trying' + - u' to snatch, ignoring', logger.DEBUG) + logger.debug(f'The result {title} doesn\'t seem to be a valid season that we are trying' + f' to snatch, ignoring') add_cache_entry = True elif len(parse_result.episode_numbers) and not [ ep_obj for ep_obj in ep_obj_list if ep_obj.season == parse_result.season_number and ep_obj.episode in parse_result.episode_numbers]: - logger.log(u'The result ' + title + ' doesn\'t seem to be a valid episode that we are trying' + - u' to snatch, ignoring', logger.DEBUG) + logger.debug(f'The result {title} doesn\'t seem to be a valid episode that we are trying' + f' to snatch, ignoring') add_cache_entry = True if not add_cache_entry: @@ -1255,8 +1253,8 @@ class GenericProvider(object): episode_numbers = parse_result.episode_numbers else: if not parse_result.is_air_by_date: - logger.log(u'This is supposed to be a date search but the result ' + title + - u' didn\'t parse as one, skipping it', logger.DEBUG) + logger.debug(f'This is supposed to be a date search but the result {title}' + f' didn\'t parse as one, skipping it') add_cache_entry = True else: season_number = parse_result.season_number @@ -1265,13 +1263,13 @@ class GenericProvider(object): if not episode_numbers or \ not [ep_obj for ep_obj in ep_obj_list if ep_obj.season == season_number and ep_obj.episode in episode_numbers]: - logger.log(u'The result ' + title + ' doesn\'t seem to be a valid episode that we are trying' + - u' to snatch, ignoring', logger.DEBUG) + logger.debug(f'The result {title} doesn\'t seem to be a valid episode that we are trying' + f' to snatch, ignoring') add_cache_entry = True # add parsed result to cache for usage later on if add_cache_entry: - logger.log(u'Adding item from search to cache: ' + title, logger.DEBUG) + logger.debug(f'Adding item from search to cache: {title}') ci = self.cache.add_cache_entry(title, url, parse_result=parse_result) if None is not ci: cl.append(ci) @@ -1288,11 +1286,11 @@ class GenericProvider(object): multi_ep = 1 < len(episode_numbers) if not want_ep: - logger.log(u'Ignoring result %s because we don\'t want an episode that is %s' - % (title, Quality.qualityStrings[quality]), logger.DEBUG) + logger.debug(f'Ignoring result {title} because we don\'t want an episode that is' + f' {Quality.qualityStrings[quality]}') continue - logger.log(u'Found result %s at %s' % (title, url), logger.DEBUG) + logger.debug(f'Found result {title} at {url}') # make a result object ep_obj_results = [] # type: List[TVEpisode] @@ -1317,14 +1315,14 @@ class GenericProvider(object): ep_num = None if 1 == len(ep_obj_results): ep_num = ep_obj_results[0].episode - logger.log(u'Single episode result.', logger.DEBUG) + logger.debug('Single episode result.') elif 1 < len(ep_obj_results): ep_num = MULTI_EP_RESULT - logger.log(u'Separating multi-episode result to check for later - result contains episodes: ' + - str(parse_result.episode_numbers), logger.DEBUG) + logger.debug(f'Separating multi-episode result to check for later - result contains episodes:' + f' {parse_result.episode_numbers}') elif 0 == len(ep_obj_results): ep_num = SEASON_RESULT - logger.log(u'Separating full season result to check for later', logger.DEBUG) + logger.debug('Separating full season result to check for later') if ep_num not in results: # noinspection PyTypeChecker @@ -1390,7 +1388,7 @@ class GenericProvider(object): if not self.should_skip(): str1, thing, str3 = (('', '%s item' % mode.lower(), ''), (' usable', 'proper', ' found'))['Propers' == mode] - logger.log((u'%s %s in response%s from %s' % (('No' + str1, count)[0 < count], ( + logger.log(('%s %s in response%s from %s' % (('No' + str1, count)[0 < count], ( '%s%s%s%s' % (('', 'freeleech ')[getattr(self, 'freeleech', False)], thing, maybe_plural(count), str3)), ('', ' (rejects: %s)' % rejects)[bool(rejects)], re.sub(r'(\s)\s+', r'\1', url))).replace('%%', '%')) @@ -1412,9 +1410,9 @@ class GenericProvider(object): reqd = 'cf_clearance' if reqd in ui_string_method(key) and reqd not in cookies: return False, \ - u'%(p)s Cookies setting require %(r)s. If %(r)s not found in browser, log out,' \ - u' delete site cookies, refresh browser, %(r)s should be created' % \ - dict(p=self.name, r='\'%s\'' % reqd) + '%(p)s Cookies setting require %(r)s. If %(r)s not found in browser, log out,' \ + ' delete site cookies, refresh browser, %(r)s should be created' % \ + dict(p=self.name, r='\'%s\'' % reqd) cj = requests.utils.add_dict_to_cookiejar(self.session.cookies, dict([x.strip().split('=', 1) for x in cookies.split(';') @@ -1586,7 +1584,7 @@ class NZBProvider(GenericProvider): if result_date: result_date = datetime.datetime(*result_date[0:6]) else: - logger.log(u'Unable to figure out the date for entry %s, skipping it' % title) + logger.log(f'Unable to figure out the date for entry {title}, skipping it') continue if not search_date or search_date < result_date: @@ -1918,7 +1916,7 @@ class TorrentProvider(GenericProvider): success, msg = self._check_cookie() if not success: self.cookies = None - logger.log(u'%s' % msg, logger.WARNING) + logger.warning(f'{msg}') return url_base = getattr(self, 'url_base', None) @@ -1998,12 +1996,12 @@ class TorrentProvider(GenericProvider): r'(?i)([1-3]((<[^>]+>)|\W)*(attempts|tries|remain)[\W\w]{,40}?(remain|left|attempt)|last[^<]+?attempt)', y)) logged_in, failed_msg = [None is not a and a or b for (a, b) in ( (logged_in, (lambda y=None: self.has_all_cookies())), - (failed_msg, (lambda y='': maxed_out(y) and u'Urgent abort, running low on login attempts. ' + - u'Password flushed to prevent service disruption to %s.' or + (failed_msg, (lambda y='': maxed_out(y) and 'Urgent abort, running low on login attempts. ' + + 'Password flushed to prevent service disruption to %s.' or (re.search(r'(?i)(username|password)((<[^>]+>)|\W)*' + r'(or|and|/|\s)((<[^>]+>)|\W)*(password|incorrect)', y) and - u'Invalid username or password for %s. Check settings' or - u'Failed to authenticate or parse a response from %s, abort provider'))) + 'Invalid username or password for %s. Check settings' or + 'Failed to authenticate or parse a response from %s, abort provider'))) )] if logged_in() and (not hasattr(self, 'urls') or bool(len(getattr(self, 'urls')))): @@ -2017,7 +2015,7 @@ class TorrentProvider(GenericProvider): if not self._check_auth(): return False except AuthException as e: - logger.log('%s' % ex(e), logger.ERROR) + logger.error('%s' % ex(e)) return False if isinstance(url, type([])): @@ -2094,7 +2092,7 @@ class TorrentProvider(GenericProvider): sickgear.save_config() msg = failed_msg(response) if msg: - logger.log(msg % self.name, logger.ERROR) + logger.error(msg % self.name) return False diff --git a/sickgear/providers/hdbits.py b/sickgear/providers/hdbits.py index 11542acd..6c8ed495 100644 --- a/sickgear/providers/hdbits.py +++ b/sickgear/providers/hdbits.py @@ -49,7 +49,7 @@ class HDBitsProvider(generic.TorrentProvider): def _check_auth_from_data(self, parsed_json): if 'status' in parsed_json and 5 == parsed_json.get('status') and 'message' in parsed_json: - logger.log(u'Incorrect username or password for %s: %s' % (self.name, parsed_json['message']), logger.DEBUG) + logger.debug(f'Incorrect username or password for {self.name}: {parsed_json["message"]}') raise AuthException('Your username or password for %s is incorrect, check your config.' % self.name) return True @@ -115,10 +115,10 @@ class HDBitsProvider(generic.TorrentProvider): try: if not (json_resp and self._check_auth_from_data(json_resp) and 'data' in json_resp): - logger.log(u'Response from %s does not contain any json data, abort' % self.name, logger.ERROR) + logger.error(f'Response from {self.name} does not contain any json data, abort') return results except AuthException as e: - logger.log(u'Authentication error: %s' % (ex(e)), logger.ERROR) + logger.error(f'Authentication error: {ex(e)}') return results cnt = len(items[mode]) diff --git a/sickgear/providers/hdspace.py b/sickgear/providers/hdspace.py index 103f1e46..d693d7af 100644 --- a/sickgear/providers/hdspace.py +++ b/sickgear/providers/hdspace.py @@ -128,7 +128,7 @@ class HDSpaceProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, log + search_url) results = self._sort_seeding(mode, results + items[mode]) diff --git a/sickgear/providers/hdtorrents.py b/sickgear/providers/hdtorrents.py index c9b88823..8fcb5067 100644 --- a/sickgear/providers/hdtorrents.py +++ b/sickgear/providers/hdtorrents.py @@ -131,7 +131,7 @@ class HDTorrentsProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, log + search_url) diff --git a/sickgear/providers/iptorrents.py b/sickgear/providers/iptorrents.py index c5801d64..f399be6c 100644 --- a/sickgear/providers/iptorrents.py +++ b/sickgear/providers/iptorrents.py @@ -58,7 +58,7 @@ class IPTorrentsProvider(generic.TorrentProvider): ['IPTorrents' in y, 'type="password"' not in y[0:2048], self.has_all_cookies()] + [(self.session.cookies.get(c, domain='') or 'sg!no!pw') in self.digest for c in ('uid', 'pass')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) @staticmethod def _has_signature(data=None): @@ -154,7 +154,7 @@ class IPTorrentsProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url, log_settings_hint) if self.is_search_finished(mode, items, cnt_search, rc['id'], last_recent_search, lrs_new, lrs_found): diff --git a/sickgear/providers/limetorrents.py b/sickgear/providers/limetorrents.py index 18ee1e7b..7a429b74 100644 --- a/sickgear/providers/limetorrents.py +++ b/sickgear/providers/limetorrents.py @@ -114,7 +114,7 @@ class LimeTorrentsProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) @@ -131,7 +131,7 @@ class LimeTorrentsProvider(generic.TorrentProvider): try: result = re.findall('(?i)"(magnet:[^"]+?)"', html)[0] except IndexError: - logger.log('Failed no magnet in response', logger.DEBUG) + logger.debug('Failed no magnet in response') return result diff --git a/sickgear/providers/magnetdl.py b/sickgear/providers/magnetdl.py index 5bad6c03..b6ed7559 100644 --- a/sickgear/providers/magnetdl.py +++ b/sickgear/providers/magnetdl.py @@ -99,7 +99,7 @@ class MagnetDLProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/morethan.py b/sickgear/providers/morethan.py index a25b9c50..bb94c431 100644 --- a/sickgear/providers/morethan.py +++ b/sickgear/providers/morethan.py @@ -112,7 +112,7 @@ class MoreThanProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/ncore.py b/sickgear/providers/ncore.py index ffbc9513..d99e9512 100644 --- a/sickgear/providers/ncore.py +++ b/sickgear/providers/ncore.py @@ -105,7 +105,7 @@ class NcoreProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/nebulance.py b/sickgear/providers/nebulance.py index f8005eca..843beb4a 100644 --- a/sickgear/providers/nebulance.py +++ b/sickgear/providers/nebulance.py @@ -119,7 +119,7 @@ class NebulanceProvider(generic.TorrentProvider): items[mode].append((title, download_url, seeders, self._bytesizer(size))) except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) results = self._sort_seeding(mode, results + items[mode]) diff --git a/sickgear/providers/newznab.py b/sickgear/providers/newznab.py index 2fe12d6a..fb7cc782 100644 --- a/sickgear/providers/newznab.py +++ b/sickgear/providers/newznab.py @@ -331,7 +331,7 @@ class NewznabProvider(generic.NZBProvider): except (BaseException, Exception): continue except (BaseException, Exception): - logger.log('Error parsing result for [%s]' % self.name, logger.DEBUG) + logger.debug('Error parsing result for [%s]' % self.name) if not caps and self._caps and not all_cats and self._caps_all_cats and not cats and self._caps_cats: self._check_excludes(cats) @@ -644,14 +644,14 @@ class NewznabProvider(generic.NZBProvider): if not s.show_obj.is_anime and not s.show_obj.is_sports: if not getattr(s, 'wanted_quality', None): # this should not happen, the creation is missing for the search in this case - logger.log('wanted_quality property was missing for search, creating it', logger.WARNING) + logger.warning('wanted_quality property was missing for search, creating it') ep_status, ep_quality = Quality.split_composite_status(ep_obj.status) s.wanted_quality = get_wanted_qualities(ep_obj, ep_status, ep_quality, unaired=True) needed.check_needed_qualities(s.wanted_quality) if not hasattr(ep_obj, 'eps_aired_in_season'): # this should not happen, the creation is missing for the search in this case - logger.log('eps_aired_in_season property was missing for search, creating it', logger.WARNING) + logger.warning('eps_aired_in_season property was missing for search, creating it') ep_count, ep_count_scene = get_aired_in_season(ep_obj.show_obj) ep_obj.eps_aired_in_season = ep_count.get(ep_obj.season, 0) ep_obj.eps_aired_in_scene_season = ep_count_scene.get(ep_obj.scene_season, 0) if ep_obj.show_obj.is_scene \ @@ -978,14 +978,14 @@ class NewznabProvider(generic.NZBProvider): parsed_xml, n_spaces = self.cache.parse_and_get_ns(data) items = parsed_xml.findall('channel/item') except (BaseException, Exception): - logger.log('Error trying to load %s RSS feed' % self.name, logger.WARNING) + logger.warning('Error trying to load %s RSS feed' % self.name) break if not self._check_auth_from_data(parsed_xml, search_url): break if 'rss' != parsed_xml.tag: - logger.log('Resulting XML from %s isn\'t RSS, not parsing it' % self.name, logger.WARNING) + logger.warning('Resulting XML from %s isn\'t RSS, not parsing it' % self.name) break i and time.sleep(2.1) @@ -996,8 +996,7 @@ class NewznabProvider(generic.NZBProvider): if title and url: results.append(item) else: - logger.log('The data returned from %s is incomplete, this result is unusable' % self.name, - logger.DEBUG) + logger.debug('The data returned from %s is incomplete, this result is unusable' % self.name) # get total and offset attributes try: @@ -1036,8 +1035,8 @@ class NewznabProvider(generic.NZBProvider): # there are more items available than the amount given in one call, grab some more items = total - request_params['offset'] - logger.log('%s more item%s to fetch from a batch of up to %s items.' - % (items, helpers.maybe_plural(items), request_params['limit']), logger.DEBUG) + logger.debug(f'{items} more item{helpers.maybe_plural(items)} to fetch from a batch of up to' + f' {request_params["limit"]} items.') batch_count = self._log_result(results, mode, cnt, search_url) exit_log = False @@ -1125,7 +1124,7 @@ class NewznabProvider(generic.NZBProvider): result_date = self._parse_pub_date(item) if not result_date: - logger.log(u'Unable to figure out the date for entry %s, skipping it' % title) + logger.log(f'Unable to figure out the date for entry {title}, skipping it') continue result_size, result_uid = self._parse_size_uid(item, ns=n_space) @@ -1201,7 +1200,7 @@ class NewznabCache(tvcache.TVCache): else: (items, n_spaces) = self.provider.cache_data(needed=needed) except (BaseException, Exception) as e: - logger.log('Error updating Cache: %s' % ex(e), logger.ERROR) + logger.error('Error updating Cache: %s' % ex(e)) items = None if items: @@ -1257,5 +1256,4 @@ class NewznabCache(tvcache.TVCache): if title and url: return self.add_cache_entry(title, url, tvid_prodid=ids) - logger.log('Data returned from the %s feed is incomplete, this result is unusable' % self.provider.name, - logger.DEBUG) + logger.debug('Data returned from the %s feed is incomplete, this result is unusable' % self.provider.name) diff --git a/sickgear/providers/nyaa.py b/sickgear/providers/nyaa.py index 65156509..4bb3f460 100644 --- a/sickgear/providers/nyaa.py +++ b/sickgear/providers/nyaa.py @@ -91,7 +91,7 @@ class NyaaProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/omgwtfnzbs.py b/sickgear/providers/omgwtfnzbs.py index 054dfad9..1d7e4bc6 100644 --- a/sickgear/providers/omgwtfnzbs.py +++ b/sickgear/providers/omgwtfnzbs.py @@ -87,8 +87,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider): if re.search('(?i)(information is incorrect|in(?:valid|correct).*?(?:username|api))', data_json.get('notice')): - logger.log(u'Incorrect authentication credentials for ' + self.name + ' : ' + str(description_text), - logger.DEBUG) + logger.debug(f'Incorrect authentication credentials for {self.name} : {description_text}') raise AuthException( 'Your authentication credentials for ' + self.name + ' are incorrect, check your config.') @@ -96,7 +95,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider): return True else: - logger.log(u'Unknown error given from ' + self.name + ' : ' + str(description_text), logger.DEBUG) + logger.debug(f'Unknown error given from {self.name} : {str(description_text)}') return False return True @@ -149,7 +148,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider): self.tmr_limit_update('1', 'h', 'Your 24 hour limit of 10 NZBs has been reached') self.log_failure_url(url) elif '' not in data or 'seem to be logged in' in data: - logger.log('Failed nzb data response: %s' % data, logger.DEBUG) + logger.debug('Failed nzb data response: %s' % data) else: result = data return result @@ -345,7 +344,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider): time.sleep(1.1) pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') mode = (mode, search_mode)['Propers' == search_mode] self._log_search(mode, len(results) - cnt, search_url) @@ -400,7 +399,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider): if success and self.nn: success, msg = None, 'pm dev in irc about this feature' if not success: - logger.log(u'%s: %s' % (msg, self.cookies), logger.WARNING) + logger.warning(f'{msg}: {self.cookies}') self.cookies = None return None return False diff --git a/sickgear/providers/pretome.py b/sickgear/providers/pretome.py index 23d067dd..460fd807 100644 --- a/sickgear/providers/pretome.py +++ b/sickgear/providers/pretome.py @@ -100,7 +100,7 @@ class PreToMeProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.error(u'Failed to parse. Traceback: %s' % traceback.format_exc()) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/privatehd.py b/sickgear/providers/privatehd.py index 7ba28252..08ae3a7d 100644 --- a/sickgear/providers/privatehd.py +++ b/sickgear/providers/privatehd.py @@ -56,7 +56,7 @@ class PrivateHDProvider(generic.TorrentProvider): return super(PrivateHDProvider, self)._authorised( logged_in=(lambda y='': 'English' in y and 'auth/login' not in y and all( [(self.session.cookies.get('privatehdx_session', domain='') or 'sg!no!pw') in self.digest])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) def _search_provider(self, search_params, **kwargs): @@ -88,7 +88,7 @@ class PrivateHDProvider(generic.TorrentProvider): show_type = self.show_obj.air_by_date and 'Air By Date' \ or self.show_obj.is_sports and 'Sports' or self.show_obj.is_anime and 'Anime' or None if show_type: - logger.log(u'Provider does not carry shows of type: [%s], skipping' % show_type, logger.DEBUG) + logger.debug(f'Provider does not carry shows of type: [{show_type}], skipping') return results for search_string in search_params[mode]: @@ -141,7 +141,7 @@ class PrivateHDProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, log + search_url) diff --git a/sickgear/providers/ptf.py b/sickgear/providers/ptf.py index da1c94f2..d041d43a 100644 --- a/sickgear/providers/ptf.py +++ b/sickgear/providers/ptf.py @@ -56,7 +56,7 @@ class PTFProvider(generic.TorrentProvider): logged_in=(lambda y='': all( ['RSS Feed' in y, self.has_all_cookies('session_key')] + [(self.session.cookies.get(x) or 'sg!no!pw') in self.digest for x in ['session_key']])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) def _search_provider(self, search_params, **kwargs): @@ -144,7 +144,7 @@ class PTFProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, log + self.session.response.get('url')) diff --git a/sickgear/providers/rarbg.py b/sickgear/providers/rarbg.py index ab9f2ffe..8889bfc3 100644 --- a/sickgear/providers/rarbg.py +++ b/sickgear/providers/rarbg.py @@ -68,7 +68,7 @@ class RarbgProvider(generic.TorrentProvider): return True time.sleep(2) - logger.log(u'No usable API token returned from: %s' % self.urls['api_token'], logger.ERROR) + logger.error(f'No usable API token returned from: {self.urls["api_token"]}') return False @staticmethod diff --git a/sickgear/providers/revtt.py b/sickgear/providers/revtt.py index 50527f39..2e367969 100644 --- a/sickgear/providers/revtt.py +++ b/sickgear/providers/revtt.py @@ -102,7 +102,7 @@ class RevTTProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, self.session.response.get('url')) diff --git a/sickgear/providers/rsstorrent.py b/sickgear/providers/rsstorrent.py index 802eae4e..31971841 100644 --- a/sickgear/providers/rsstorrent.py +++ b/sickgear/providers/rsstorrent.py @@ -59,7 +59,7 @@ class TorrentRssProvider(generic.TorrentProvider): title, url = None, None if item.title: - title = re.sub(r'\s+', '.', u'' + item.title) + title = re.sub(r'\s+', '.', '' + item.title) attempt_list = [lambda: item.torrent_magneturi, lambda: item.enclosures[0].href, diff --git a/sickgear/providers/scenehd.py b/sickgear/providers/scenehd.py index 74da4457..a04aa810 100644 --- a/sickgear/providers/scenehd.py +++ b/sickgear/providers/scenehd.py @@ -47,7 +47,7 @@ class SceneHDProvider(generic.TorrentProvider): return super(SceneHDProvider, self)._authorised( logged_in=(lambda y='': ['RSS links' in y] and all( [(self.session.cookies.get(c, domain='') or 'sg!no!pw') in self.digest for c in ('uid', 'pass')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) def _search_provider(self, search_params, **kwargs): @@ -109,7 +109,7 @@ class SceneHDProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/scenetime.py b/sickgear/providers/scenetime.py index f4f783fb..edc318f0 100644 --- a/sickgear/providers/scenetime.py +++ b/sickgear/providers/scenetime.py @@ -50,7 +50,7 @@ class SceneTimeProvider(generic.TorrentProvider): ['staff-support' in y, self.has_all_cookies()] + [(self.session.cookies.get(x, domain='') or 'sg!no!pw') in self.digest for x in ('uid', 'pass')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) @staticmethod def _has_signature(data=None): @@ -146,7 +146,7 @@ class SceneTimeProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url, log_settings_hint) diff --git a/sickgear/providers/shazbat.py b/sickgear/providers/shazbat.py index 3121924d..81bf520c 100644 --- a/sickgear/providers/shazbat.py +++ b/sickgear/providers/shazbat.py @@ -134,7 +134,7 @@ class ShazbatProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) results = self._sort_seeding(mode, results + items[mode]) diff --git a/sickgear/providers/showrss.py b/sickgear/providers/showrss.py index e9356e14..392e8e45 100644 --- a/sickgear/providers/showrss.py +++ b/sickgear/providers/showrss.py @@ -114,7 +114,7 @@ class ShowRSSProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) results = self._sort_seeding(mode, results + items[mode]) diff --git a/sickgear/providers/snowfl.py b/sickgear/providers/snowfl.py index 25f46c3a..b0a252ac 100644 --- a/sickgear/providers/snowfl.py +++ b/sickgear/providers/snowfl.py @@ -117,7 +117,7 @@ class SnowflProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/speedapp.py b/sickgear/providers/speedapp.py index 478e20b3..a354988a 100644 --- a/sickgear/providers/speedapp.py +++ b/sickgear/providers/speedapp.py @@ -46,7 +46,7 @@ class SpeedAppProvider(generic.TorrentProvider): return super(SpeedAppProvider, self)._authorised( logged_in=self.logged_in, parse_json=True, headers=self.auth_header(), - failed_msg=(lambda y=None: u'Invalid token or permissions for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid token or permissions for %s. Check settings')) def logged_in(self, resp=None): diff --git a/sickgear/providers/speedcd.py b/sickgear/providers/speedcd.py index 9964362a..f1a12083 100644 --- a/sickgear/providers/speedcd.py +++ b/sickgear/providers/speedcd.py @@ -94,9 +94,9 @@ class SpeedCDProvider(generic.TorrentProvider): self.digest = 'inSpeed_speedian=%s' % self.session.cookies.get('inSpeed_speedian') sickgear.save_config() result = True - logger.log('Cookie details for %s updated.' % self.name, logger.DEBUG) + logger.debug('Cookie details for %s updated.' % self.name) elif not self.failure_count: - logger.log('Invalid cookie details for %s and login failed. Check settings' % self.name, logger.ERROR) + logger.error('Invalid cookie details for %s and login failed. Check settings' % self.name) return result @staticmethod diff --git a/sickgear/providers/thepiratebay.py b/sickgear/providers/thepiratebay.py index bf57db9f..1e390aef 100644 --- a/sickgear/providers/thepiratebay.py +++ b/sickgear/providers/thepiratebay.py @@ -113,7 +113,7 @@ class ThePirateBayProvider(generic.TorrentProvider): if not self._reject_item(seeders, leechers): status, info_hash = [cur_item.get(k) for k in ('status', 'info_hash')] if self.confirmed and not rc['verify'].search(status): - logger.log(u'Skipping untrusted non-verified result: ' + title, logger.DEBUG) + logger.debug('Skipping untrusted non-verified result: ' + title) continue download_magnet = info_hash if '&tr=' in info_hash \ else self._dhtless_magnet(info_hash, title) @@ -236,7 +236,7 @@ class ThePirateBayProvider(generic.TorrentProvider): if self.confirmed and not ( tr.find('img', title=rc['verify']) or tr.find('img', alt=rc['verify']) or tr.find('img', src=rc['verify'])): - logger.log(u'Skipping untrusted non-verified result: ' + title, logger.DEBUG) + logger.debug('Skipping untrusted non-verified result: ' + title) continue if title and download_magnet: @@ -245,7 +245,7 @@ class ThePirateBayProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) results = self._sort_seeding(mode, results + items[mode]) diff --git a/sickgear/providers/torlock.py b/sickgear/providers/torlock.py index 79374449..3ddb1346 100644 --- a/sickgear/providers/torlock.py +++ b/sickgear/providers/torlock.py @@ -121,7 +121,7 @@ class TorLockProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/torrentday.py b/sickgear/providers/torrentday.py index 3badba6f..2e0d751d 100644 --- a/sickgear/providers/torrentday.py +++ b/sickgear/providers/torrentday.py @@ -57,7 +57,7 @@ class TorrentDayProvider(generic.TorrentProvider): ['RSS URL' in y, self.has_all_cookies()] + [(self.session.cookies.get(c, domain='') or 'sg!no!pw') in self.digest for c in ('uid', 'pass')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) @staticmethod def _has_signature(data=None): diff --git a/sickgear/providers/torrenting.py b/sickgear/providers/torrenting.py index 0870d459..c8c5c1b0 100644 --- a/sickgear/providers/torrenting.py +++ b/sickgear/providers/torrenting.py @@ -47,7 +47,7 @@ class TorrentingProvider(generic.TorrentProvider): logged_in=(lambda y='': all( ['RSS link' in y, self.has_all_cookies()] + [(self.session.cookies.get(x) or 'sg!no!pw') in self.digest for x in ('uid', 'pass')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) @staticmethod def _has_signature(data=None): @@ -107,7 +107,7 @@ class TorrentingProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/torrentleech.py b/sickgear/providers/torrentleech.py index 148353f9..faeecb4d 100644 --- a/sickgear/providers/torrentleech.py +++ b/sickgear/providers/torrentleech.py @@ -44,7 +44,7 @@ class TorrentLeechProvider(generic.TorrentProvider): return super(TorrentLeechProvider, self)._authorised( logged_in=(lambda y='': all( ['TorrentLeech' in y, 'type="password"' not in y[0:4096], self.has_all_cookies(pre='tl')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) @staticmethod def _has_signature(data=None): diff --git a/sickgear/providers/tvchaosuk.py b/sickgear/providers/tvchaosuk.py index 8897cf92..c7576fd1 100644 --- a/sickgear/providers/tvchaosuk.py +++ b/sickgear/providers/tvchaosuk.py @@ -142,7 +142,7 @@ class TVChaosUKProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') if soup: soup.clear(True) diff --git a/sickgear/providers/xspeeds.py b/sickgear/providers/xspeeds.py index e500b438..8b3a7487 100644 --- a/sickgear/providers/xspeeds.py +++ b/sickgear/providers/xspeeds.py @@ -67,7 +67,7 @@ class XspeedsProvider(generic.TorrentProvider): if self.should_skip(): return results for search_string in search_params[mode]: - search_string = search_string.replace(u'£', '%') + search_string = search_string.replace('£', '%') search_string = re.sub(r'[\s.]+', '%', search_string) kwargs = dict(post_data={'keywords': search_string, 'do': 'quick_sort', 'page': '0', @@ -131,7 +131,7 @@ class XspeedsProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, ('search string: ' + search_string.replace('%', '%%'), self.name)['Cache' == mode]) diff --git a/sickgear/rssfeeds.py b/sickgear/rssfeeds.py index dd24463c..f91b67fe 100644 --- a/sickgear/rssfeeds.py +++ b/sickgear/rssfeeds.py @@ -30,9 +30,9 @@ class RSSFeeds(object): if data and 'error' in data.feed: err_code = data.feed['error']['code'] err_desc = data.feed['error']['description'] - logger.log(u'RSS error:[%s] code:[%s]' % (err_desc, err_code), logger.DEBUG) + logger.debug(f'RSS error:[{err_desc}] code:[{err_code}]') else: - logger.log(u'RSS error loading url: ' + url, logger.DEBUG) + logger.debug(f'RSS error loading url: {url}') except (BaseException, Exception) as e: - logger.log(u'RSS error: ' + ex(e), logger.DEBUG) + logger.debug(f'RSS error: {ex(e)}') diff --git a/sickgear/sab.py b/sickgear/sab.py index 8efa531b..e98b69f4 100644 --- a/sickgear/sab.py +++ b/sickgear/sab.py @@ -67,10 +67,10 @@ def send_nzb(search_result): return False kwargs['files'] = {'nzbfile': ('%s.nzb' % search_result.name, nzb_data)} - logger.log(u'Sending %s to SABnzbd: %s' % (nzb_type, search_result.name)) + logger.log(f'Sending {nzb_type} to SABnzbd: {search_result.name}') url = '%sapi' % sickgear.SAB_HOST - logger.log(u'SABnzbd at %s sent params: %s' % (url, params), logger.DEBUG) + logger.debug(f'SABnzbd at {url} sent params: {params}') success, result = _get_url(url, **kwargs) if not success: @@ -78,23 +78,23 @@ def send_nzb(search_result): # do some crude parsing of the result text to determine what SAB said if result.get('status'): - logger.log(u'Success from SABnzbd using %s' % nzb_type, logger.DEBUG) + logger.debug(f'Success from SABnzbd using {nzb_type}') return True elif 'error' in result: - logger.log(u'Failed using %s with SABnzbd, response: %s' % (nzb_type, result.get('error', 'und')), logger.ERROR) + logger.error(f'Failed using {nzb_type} with SABnzbd, response: {result.get("error", "und")}') else: - logger.log(u'Failure unknown using %s with SABnzbd, response: %s' % (nzb_type, result), logger.ERROR) + logger.error(f'Failure unknown using {nzb_type} with SABnzbd, response: {result}') return False def _check_sab_response(result): if 0 == len(result): - logger.log('No data returned from SABnzbd, nzb not used', logger.ERROR) + logger.error('No data returned from SABnzbd, nzb not used') return False, 'No data from SABnzbd' if 'error' in result: - logger.log(result['error'], logger.ERROR) + logger.error(result['error']) return False, result['error'] return True, result @@ -103,7 +103,7 @@ def _get_url(url, params=None, **kwargs): result = sickgear.helpers.get_url(url, params=params, parse_json=True, **kwargs) if None is result: - logger.log('Error, no response from SABnzbd', logger.ERROR) + logger.error('Error, no response from SABnzbd') return False, 'Error, no response from SABnzbd' return True, result @@ -132,7 +132,7 @@ def test_authentication(host=None, username=None, password=None, apikey=None): url = '%sapi' % host # send the test request - logger.log(u'SABnzbd test URL: %s with parameters: %s' % (url, params), logger.DEBUG) + logger.debug(f'SABnzbd test URL: {url} with parameters: {params}') success, result = _get_url(url, params=params) if not success: return False, result diff --git a/sickgear/scene_exceptions.py b/sickgear/scene_exceptions.py index ceaa42d4..3e4064fe 100644 --- a/sickgear/scene_exceptions.py +++ b/sickgear/scene_exceptions.py @@ -36,7 +36,7 @@ from .sgdatetime import timestamp_near import lib.rarfile.rarfile as rarfile from _23 import list_range -from six import iteritems, text_type +from six import iteritems # noinspection PyUnreachableCode if False: @@ -229,10 +229,10 @@ def retrieve_exceptions(): """ global exception_dict, anidb_exception_dict, xem_exception_dict - # exceptions are stored on github pages + # exceptions are stored on GitHub pages for tvid in sickgear.TVInfoAPI().sources: if should_refresh(sickgear.TVInfoAPI(tvid).name): - logger.log(u'Checking for scene exception updates for %s' % sickgear.TVInfoAPI(tvid).name) + logger.log(f'Checking for scene exception updates for {sickgear.TVInfoAPI(tvid).name}') url = sickgear.TVInfoAPI(tvid).config.get('scene_url') if not url: @@ -241,7 +241,7 @@ def retrieve_exceptions(): url_data = helpers.get_url(url) if None is url_data: # When None is urlData, trouble connecting to github - logger.log(u'Check scene exceptions update failed. Unable to get URL: %s' % url, logger.ERROR) + logger.error(f'Check scene exceptions update failed. Unable to get URL: {url}') continue else: @@ -307,8 +307,8 @@ def retrieve_exceptions(): try: cur_exception, cur_season = next(iteritems(cur_exception_dict)) except (BaseException, Exception): - logger.log('scene exception error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('scene exception error') + logger.error(traceback.format_exc()) continue cl.append(['INSERT INTO scene_exceptions' @@ -322,9 +322,9 @@ def retrieve_exceptions(): # since this could invalidate the results of the cache we clear it out after updating if changed_exceptions: - logger.log(u'Updated scene exceptions') + logger.log('Updated scene exceptions') else: - logger.log(u'No scene exceptions update needed') + logger.log('No scene exceptions update needed') # cleanup exception_dict.clear() @@ -353,14 +353,13 @@ def update_scene_exceptions(tvid, prodid, scene_exceptions): # A change has been made to the scene exception list. Let's clear the cache, to make this visible exceptionsCache[(tvid, prodid)] = defaultdict(list) - logger.log(u'Updating scene exceptions', logger.MESSAGE) + logger.log('Updating scene exceptions', logger.MESSAGE) for exception in scene_exceptions: cur_season, cur_exception = exception.split('|', 1) try: cur_season = int(cur_season) except (BaseException, Exception): - logger.log('invalid scene exception: %s - %s:%s' % ('%s:%s' % (tvid, prodid), cur_season, cur_exception), - logger.ERROR) + logger.error('invalid scene exception: %s - %s:%s' % ('%s:%s' % (tvid, prodid), cur_season, cur_exception)) continue exceptionsCache[(tvid, prodid)][cur_season].append(cur_exception) @@ -377,7 +376,7 @@ def _custom_exceptions_fetcher(): cnt_updated_numbers = 0 src_id = 'GHSG' - logger.log(u'Checking to update custom alternatives from %s' % src_id) + logger.log(f'Checking to update custom alternatives from {src_id}') dirpath = os.path.join(sickgear.CACHE_DIR, 'alts') tmppath = os.path.join(dirpath, 'tmp') @@ -399,7 +398,7 @@ def _custom_exceptions_fetcher(): rar_handle = rarfile.RarFile(file_rar) rar_handle.extractall(path=dirpath, pwd='sickgear_alt') except(BaseException, Exception) as e: - logger.log(u'Failed to unpack archive: %s with error: %s' % (file_rar, ex(e)), logger.ERROR) + logger.error(f'Failed to unpack archive: {file_rar} with error: {ex(e)}') if rar_handle: rar_handle.close() @@ -411,7 +410,7 @@ def _custom_exceptions_fetcher(): set_last_refresh(src_id) if not fetch_data and not os.path.isfile(file_cache): - logger.debug(u'Unable to fetch custom exceptions, skipped: %s' % file_rar) + logger.debug(f'Unable to fetch custom exceptions, skipped: {file_rar}') return custom_exception_dict, cnt_updated_numbers, should_refresh(src_id, iv, remaining=True) data = {} @@ -419,7 +418,7 @@ def _custom_exceptions_fetcher(): with io.open(file_cache) as fh: data = json_load(fh) except(BaseException, Exception) as e: - logger.log(u'Failed to unpack json data: %s with error: %s' % (file_rar, ex(e)), logger.ERROR) + logger.error(f'Failed to unpack json data: {file_rar} with error: {ex(e)}') # handle data from .scene_numbering import find_scene_numbering, set_scene_numbering_helper @@ -459,11 +458,9 @@ def _custom_exceptions_fetcher(): used.add((for_season, for_episode, target_season, target_episode)) if sn and ((for_season, for_episode) + sn) not in used \ and (for_season, for_episode) not in used: - logger.log( - u'Skipped setting "%s" episode %sx%s to target a release %sx%s because set to %sx%s' - % (show_obj.unique_name, for_season, for_episode, - target_season, target_episode, sn[0], sn[1]), - logger.DEBUG) + logger.debug(f'Skipped setting "{show_obj.unique_name}" episode {for_season}x{for_episode}' + f' to target a release {target_season}x{target_episode}' + f' because set to {sn[0]}x{sn[1]}') else: used.add((for_season, for_episode)) if not sn or sn != (target_season, target_episode): # not already set @@ -482,7 +479,7 @@ def _anidb_exceptions_fetcher(): global anidb_exception_dict if should_refresh('anidb'): - logger.log(u'Checking for AniDB scene exception updates') + logger.log('Checking for AniDB scene exception updates') for cur_show_obj in filter(lambda _s: _s.is_anime and TVINFO_TVDB == _s.tvid, sickgear.showList): try: anime = create_anidb_obj(name=cur_show_obj.name, tvdbid=cur_show_obj.prodid, autoCorrectName=True) @@ -506,15 +503,15 @@ def _xem_exceptions_fetcher(): if should_refresh(xem_list): for tvid in [i for i in sickgear.TVInfoAPI().sources if 'xem_origin' in sickgear.TVInfoAPI(i).config]: - logger.log(u'Checking for XEM scene exception updates for %s' % sickgear.TVInfoAPI(tvid).name) + logger.log(f'Checking for XEM scene exception updates for {sickgear.TVInfoAPI(tvid).name}') url = 'https://thexem.info/map/allNames?origin=%s%s&seasonNumbers=1'\ % (sickgear.TVInfoAPI(tvid).config['xem_origin'], ('&language=us', '')['xem' == xem_list]) parsed_json = helpers.get_url(url, parse_json=True, timeout=90) if not parsed_json: - logger.log(u'Check scene exceptions update failed for %s, Unable to get URL: %s' - % (sickgear.TVInfoAPI(tvid).name, url), logger.ERROR) + logger.error(f'Check scene exceptions update failed for {sickgear.TVInfoAPI(tvid).name},' + f' Unable to get URL: {url}') continue if 'failure' == parsed_json['result']: @@ -546,21 +543,20 @@ def _xem_get_ids(infosrc_name, xem_origin): url = 'https://thexem.info/map/havemap?origin=%s' % xem_origin task = 'Fetching show ids with%s xem scene mapping%s for origin' - logger.log(u'%s %s' % (task % ('', 's'), infosrc_name)) + logger.log(f'{task % ("", "s")} {infosrc_name}') parsed_json = helpers.get_url(url, parse_json=True, timeout=90) if not isinstance(parsed_json, dict) or not parsed_json: - logger.log(u'Failed %s %s, Unable to get URL: %s' - % (task.lower() % ('', 's'), infosrc_name, url), logger.ERROR) + logger.error(f'Failed {task.lower() % ("", "s")} {infosrc_name},' + f' Unable to get URL: {url}') else: if 'success' == parsed_json.get('result', '') and 'data' in parsed_json: xem_ids = list(set(filter(lambda prodid: 0 < prodid, map(lambda pid: helpers.try_int(pid), parsed_json['data'])))) if 0 == len(xem_ids): - logger.log(u'Failed %s %s, no data items parsed from URL: %s' - % (task.lower() % ('', 's'), infosrc_name, url), logger.WARNING) + logger.warning(f'Failed {task.lower() % ("", "s")} {infosrc_name},' + f' no data items parsed from URL: {url}') - logger.log(u'Finished %s %s' % (task.lower() % (' %s' % len(xem_ids), helpers.maybe_plural(xem_ids)), - infosrc_name)) + logger.log(f'Finished {task.lower() % (f" {len(xem_ids)}", helpers.maybe_plural(xem_ids))} {infosrc_name}') return xem_ids diff --git a/sickgear/scene_numbering.py b/sickgear/scene_numbering.py index cccb4abc..885fc527 100644 --- a/sickgear/scene_numbering.py +++ b/sickgear/scene_numbering.py @@ -799,7 +799,7 @@ def xem_refresh(tvid, prodid, force=False): refresh = True if refresh or force: - logger.log(u'Looking up XEM scene mapping for show %s on %s' % (prodid, tvinfo.name), logger.DEBUG) + logger.debug(f'Looking up XEM scene mapping for show {prodid} on {tvinfo.name}') # mark refreshed my_db.upsert('xem_refresh', @@ -809,7 +809,7 @@ def xem_refresh(tvid, prodid, force=False): try: parsed_json = sickgear.helpers.get_url(url, parse_json=True, timeout=90) if not parsed_json or '' == parsed_json: - logger.log(u'No XEM data for show %s on %s' % (prodid, tvinfo.name), logger.MESSAGE) + logger.log(f'No XEM data for show {prodid} on {tvinfo.name}', logger.MESSAGE) return if 'success' in parsed_json['result']: @@ -828,11 +828,10 @@ def xem_refresh(tvid, prodid, force=False): my_db = db.DBConnection() my_db.mass_action(cl) else: - logger.log(u'Empty lookup result - no XEM data for show %s on %s' % (prodid, tvinfo.name), logger.DEBUG) + logger.debug(f'Empty lookup result - no XEM data for show {prodid} on {tvinfo.name}') except (BaseException, Exception) as e: - logger.log(u'Exception refreshing XEM data for show ' + str(prodid) + ' on ' + tvinfo.name + ': ' + ex(e), - logger.WARNING) - logger.log(traceback.format_exc(), logger.ERROR) + logger.warning(f'Exception refreshing XEM data for show {str(prodid)} on {tvinfo.name}: {ex(e)}') + logger.error(traceback.format_exc()) def fix_xem_numbering(tvid, prodid): @@ -866,9 +865,7 @@ def fix_xem_numbering(tvid, prodid): update_scene_episode = False update_scene_absolute_number = False - logger.log( - u'Fixing any XEM scene mapping issues for show %s on %s' % (prodid, sickgear.TVInfoAPI(tvid).name), - logger.DEBUG) + logger.debug(f'Fixing any XEM scene mapping issues for show {prodid} on {sickgear.TVInfoAPI(tvid).name}') cl = [] for cur_row in sql_result: @@ -1001,15 +998,15 @@ def set_scene_numbering_helper(tvid, prodid, for_season=None, for_episode=None, if not show_obj.is_anime: scene_season = None if scene_season in [None, 'null', ''] else int(scene_season) scene_episode = None if scene_episode in [None, 'null', ''] else int(scene_episode) - action_log = u'Set episode scene numbering to %sx%s for episode %sx%s of "%s"' \ - % (scene_season, scene_episode, for_season, for_episode, show_obj.unique_name) + action_log = f'Set episode scene numbering to {scene_season}x{scene_episode}' \ + f' for episode {for_season}x{for_episode} of "{show_obj.unique_name}"' scene_args.update({'scene_season': scene_season, 'scene_episode': scene_episode}) result = {'forSeason': for_season, 'forEpisode': for_episode, 'sceneSeason': None, 'sceneEpisode': None} else: for_absolute = None if for_absolute in [None, 'null', ''] else int(for_absolute) scene_absolute = None if scene_absolute in [None, 'null', ''] else int(scene_absolute) - action_log = u'Set absolute scene numbering to %s for episode %sx%s of "%s"' \ - % (scene_absolute, for_season, for_episode, show_obj.unique_name) + action_log = f'Set absolute scene numbering to {scene_absolute}' \ + f' for episode {for_season}x{for_episode} of "{show_obj.unique_name}"' ep_args.update({'absolute': for_absolute}) scene_args.update({'absolute_number': for_absolute, 'scene_absolute': scene_absolute, 'anime': True}) result = {'forAbsolute': for_absolute, 'sceneAbsolute': None} @@ -1023,7 +1020,7 @@ def set_scene_numbering_helper(tvid, prodid, for_season=None, for_episode=None, result['success'] = None is not ep_obj and not isinstance(ep_obj, str) if result['success']: - logger.log(action_log, logger.DEBUG) + logger.debug(action_log) set_scene_numbering(**scene_args) show_obj.flush_episodes() if not show_obj.is_anime: diff --git a/sickgear/scheduler.py b/sickgear/scheduler.py index 990df34c..88bc2976 100644 --- a/sickgear/scheduler.py +++ b/sickgear/scheduler.py @@ -110,7 +110,7 @@ class Scheduler(threading.Thread): if should_run and ((self.prevent_cycle_run is not None and self.prevent_cycle_run()) or getattr(self.action, 'prevent_run', False)): - logger.log(u'%s skipping this cycle_time' % self.name, logger.WARNING) + logger.warning(f'{self.name} skipping this cycle_time') # set last_run to only check start_time after another cycle_time self.last_run = current_time should_run = False @@ -120,12 +120,12 @@ class Scheduler(threading.Thread): try: if not self.silent: - logger.log(u"Starting new thread: " + self.name, logger.DEBUG) + logger.debug(f'Starting new thread: {self.name}') self.action.run() except (BaseException, Exception) as e: - logger.log(u"Exception generated in thread " + self.name + ": " + ex(e), logger.ERROR) - logger.log(repr(traceback.format_exc()), logger.ERROR) + logger.error(f'Exception generated in thread {self.name}: {ex(e)}') + logger.error(repr(traceback.format_exc())) finally: if self.force: diff --git a/sickgear/search.py b/sickgear/search.py index d7c87fc1..67f3b22f 100644 --- a/sickgear/search.py +++ b/sickgear/search.py @@ -52,7 +52,7 @@ def _download_result(result): res_provider = result.provider if None is res_provider: - logger.log(u'Invalid provider name - this is a coding error, report it please', logger.ERROR) + logger.error('Invalid provider name - this is a coding error, report it please') return False # NZB files with a URL can just be downloaded from the provider @@ -62,9 +62,9 @@ def _download_result(result): elif 'nzbdata' == result.resultType: # get the final file path to the nzb - file_name = os.path.join(sickgear.NZB_DIR, u'%s.nzb' % result.name) + file_name = os.path.join(sickgear.NZB_DIR, f'{result.name}.nzb') - logger.log(u'Saving NZB to %s' % file_name) + logger.log(f'Saving NZB to {file_name}') new_result = True @@ -77,12 +77,12 @@ def _download_result(result): write_file(file_name, data, raise_exceptions=True) except (EnvironmentError, IOError) as e: - logger.log(u'Error trying to save NZB to black hole: %s' % ex(e), logger.ERROR) + logger.error(f'Error trying to save NZB to black hole: {ex(e)}') new_result = False elif 'torrent' == res_provider.providerType: new_result = res_provider.download_result(result) else: - logger.log(u'Invalid provider type - this is a coding error, report it please', logger.ERROR) + logger.error('Invalid provider type - this is a coding error, report it please') new_result = False return new_result @@ -120,7 +120,7 @@ def snatch_episode(result, end_status=SNATCHED): elif 'nzbget' == sickgear.NZB_METHOD: dl_result = nzbget.send_nzb(result) else: - logger.log(u'Unknown NZB action specified in config: %s' % sickgear.NZB_METHOD, logger.ERROR) + logger.error(f'Unknown NZB action specified in config: {sickgear.NZB_METHOD}') dl_result = False # TORRENT files can be sent to clients or saved to disk @@ -138,7 +138,7 @@ def snatch_episode(result, end_status=SNATCHED): if not result.content and not result.url.startswith('magnet'): result.content = result.provider.get_url(result.url, as_binary=True) if result.provider.should_skip() or not result.content: - logger.log(u'Torrent content failed to download from %s' % result.url, logger.ERROR) + logger.error(f'Torrent content failed to download from {result.url}') return False # Snatches torrent with client dl_result = clients.get_client_instance(sickgear.TORRENT_METHOD)().send_torrent(result) @@ -146,7 +146,7 @@ def snatch_episode(result, end_status=SNATCHED): if result.cache_filepath: helpers.remove_file_perm(result.cache_filepath) else: - logger.log(u'Unknown result type, unable to download it', logger.ERROR) + logger.error('Unknown result type, unable to download it') dl_result = False if not dl_result: @@ -155,7 +155,7 @@ def snatch_episode(result, end_status=SNATCHED): if sickgear.USE_FAILED_DOWNLOADS: failed_history.add_snatched(result) - ui.notifications.message(u'Episode snatched', result.name) + ui.notifications.message('Episode snatched', result.name) history.log_snatch(result) @@ -198,13 +198,13 @@ def pass_show_wordlist_checks(name, show_obj): result = show_name_helpers.contains_any(name, show_obj.rls_ignore_words, rx=show_obj.rls_ignore_words_regex, **re_extras) if None is not result and result: - logger.log(u'Ignored: %s for containing ignore word' % name) + logger.log(f'Ignored: {name} for containing ignore word') return False result = show_name_helpers.contains_any(name, show_obj.rls_require_words, rx=show_obj.rls_require_words_regex, **re_extras) if None is not result and not result: - logger.log(u'Ignored: %s for not containing any required word match' % name) + logger.log(f'Ignored: {name} for not containing any required word match') return False return True @@ -225,8 +225,8 @@ def pick_best_result( :param filter_rls: optional thread name :return: best search result """ - msg = (u'Picking the best result out of %s', u'Checking the best result %s')[1 == len(results)] - logger.log(msg % [x.name for x in results], logger.DEBUG) + msg = ('Picking the best result out of %s', 'Checking the best result %s')[1 == len(results)] + logger.debug(msg % [x.name for x in results]) # find the best result for the current episode best_result = None @@ -252,8 +252,8 @@ def pick_best_result( continue if quality_list and cur_result.quality not in quality_list: - logger.log(u'Rejecting unwanted quality %s for [%s]' % ( - Quality.qualityStrings[cur_result.quality], cur_result.name), logger.DEBUG) + logger.debug(f'Rejecting unwanted quality {Quality.qualityStrings[cur_result.quality]}' + f' for [{cur_result.name}]') continue if not pass_show_wordlist_checks(cur_result.name, show_obj): @@ -262,12 +262,12 @@ def pick_best_result( cur_size = getattr(cur_result, 'size', None) if sickgear.USE_FAILED_DOWNLOADS and None is not cur_size and failed_history.has_failed( cur_result.name, cur_size, cur_result.provider.name): - logger.log(u'Rejecting previously failed [%s]' % cur_result.name) + logger.log(f'Rejecting previously failed [{cur_result.name}]') continue if filter_rls and any([scene_only, non_scene_fallback, scene_rej_nuked, scene_nuked_active]): if show_obj.is_anime: - addendum = u'anime (skipping scene/nuke filter) ' + addendum = 'anime (skipping scene/nuke filter) ' else: scene_contains = False if scene_only and scene_or_contain: @@ -277,25 +277,23 @@ def pick_best_result( scene_contains = True if scene_contains and not scene_rej_nuked: - logger.log(u'Considering title match to \'or contain\' [%s]' % cur_result.name, logger.DEBUG) + logger.debug(f'Considering title match to \'or contain\' [{cur_result.name}]') reject = False else: reject, url = can_reject(cur_result.name) if reject: if isinstance(reject, string_types): if scene_rej_nuked and not scene_nuked_active: - logger.log(u'Rejecting nuked release. Nuke reason [%s] source [%s]' % (reject, url), - logger.DEBUG) + logger.debug(f'Rejecting nuked release. Nuke reason [{reject}] source [{url}]') elif scene_nuked_active: best_fallback_result = best_candidate(best_fallback_result, cur_result) else: - logger.log(u'Considering nuked release. Nuke reason [%s] source [%s]' % (reject, url), - logger.DEBUG) + logger.debug(f'Considering nuked release. Nuke reason [{reject}] source [{url}]') reject = False elif scene_contains or non_scene_fallback: best_fallback_result = best_candidate(best_fallback_result, cur_result) else: - logger.log(u'Rejecting as not scene release listed at any [%s]' % url, logger.DEBUG) + logger.debug(f'Rejecting as not scene release listed at any [{url}]') if reject: continue @@ -303,16 +301,16 @@ def pick_best_result( best_result = best_candidate(best_result, cur_result) if best_result and scene_only and not show_obj.is_anime: - addendum = u'scene release filtered ' + addendum = 'scene release filtered ' elif not best_result and best_fallback_result: - addendum = u'non scene release filtered ' + addendum = 'non scene release filtered ' best_result = best_fallback_result if best_result: - msg = (u'Picked as the best %s[%s]', u'Confirmed as the best %s[%s]')[1 == len(results)] - logger.log(msg % (addendum, best_result.name), logger.DEBUG) + msg = ('Picked as the best %s[%s]', 'Confirmed as the best %s[%s]')[1 == len(results)] + logger.debug(msg % (addendum, best_result.name)) else: - logger.log(u'No result picked.', logger.DEBUG) + logger.debug('No result picked.') return best_result @@ -326,7 +324,7 @@ def best_candidate(best_result, cur_result): :param cur_result: current best search result :return: new best search result """ - logger.log(u'Quality is %s for [%s]' % (Quality.qualityStrings[cur_result.quality], cur_result.name)) + logger.log(f'Quality is {Quality.qualityStrings[cur_result.quality]} for [{cur_result.name}]') if not best_result or best_result.quality < cur_result.quality != Quality.UNKNOWN: best_result = cur_result @@ -337,10 +335,10 @@ def best_candidate(best_result, cur_result): best_result = cur_result elif cur_result.properlevel == best_result.properlevel: if 'xvid' in best_result.name.lower() and 'x264' in cur_result.name.lower(): - logger.log(u'Preferring (x264 over xvid) [%s]' % cur_result.name) + logger.log(f'Preferring (x264 over xvid) [{cur_result.name}]') best_result = cur_result elif re.search('(?i)(h.?|x)264', best_result.name) and re.search('(?i)((h.?|x)265|hevc)', cur_result.name): - logger.log(u'Preferring (x265 over x264) [%s]' % cur_result.name) + logger.log(f'Preferring (x265 over x264) [{cur_result.name}]') best_result = cur_result elif 'internal' in best_result.name.lower() and 'internal' not in cur_result.name.lower(): best_result = cur_result @@ -358,7 +356,7 @@ def is_final_result(result): returns True, if not then it's False """ - logger.log(u'Checking if searching should continue after finding %s' % result.name, logger.DEBUG) + logger.debug(f'Checking if searching should continue after finding {result.name}') show_obj = result.ep_obj_list[0].show_obj @@ -399,8 +397,7 @@ def is_first_best_match(ep_status, result): :return: """ - logger.log(u'Checking if the first best quality match should be archived for episode %s' % - result.name, logger.DEBUG) + logger.debug(f'Checking if the first best quality match should be archived for episode {result.name}') show_obj = result.ep_obj_list[0].show_obj cur_status, cur_quality = Quality.split_composite_status(ep_status) @@ -570,7 +567,7 @@ def wanted_episodes(show_obj, # type: TVShow ['%d unaired episode%s', total_unaired]: if 0 < total: actions.append(msg % (total, helpers.maybe_plural(total))) - logger.log(u'We want %s for %s' % (' and '.join(actions), show_obj.unique_name)) + logger.log(f'We want {" and ".join(actions)} for {show_obj.unique_name}') return wanted @@ -602,8 +599,8 @@ def search_for_needed_episodes(ep_obj_list): for cur_ep_obj in ep_obj_search_result_list: if cur_ep_obj.show_obj.paused: - logger.debug(u'Show %s is paused, ignoring all RSS items for %s' % - (cur_ep_obj.show_obj.unique_name, cur_ep_obj.pretty_name())) + logger.debug(f'Show {cur_ep_obj.show_obj.unique_name} is paused,' + f' ignoring all RSS items for {cur_ep_obj.pretty_name()}') continue # find the best result for the current episode @@ -612,7 +609,7 @@ def search_for_needed_episodes(ep_obj_list): # if all results were rejected move on to the next episode if not best_result: - logger.log(u'All found results for %s were rejected.' % cur_ep_obj.pretty_name(), logger.DEBUG) + logger.debug(f'All found results for {cur_ep_obj.pretty_name()} were rejected.') continue # if it's already in the list (from another provider) and the newly found quality is no better, then skip it @@ -639,11 +636,10 @@ def search_for_needed_episodes(ep_obj_list): threading.current_thread().name = orig_thread_name if not len(providers): - logger.log('No NZB/Torrent providers in Media Providers/Options are enabled to match recent episodes', - logger.WARNING) + logger.warning('No NZB/Torrent providers in Media Providers/Options are enabled to match recent episodes') elif not search_done: - logger.log('Failed recent search of %s enabled provider%s. More info in debug log.' % ( - len(providers), helpers.maybe_plural(providers)), logger.ERROR) + logger.error(f'Failed recent search of {len(providers)} enabled provider{helpers.maybe_plural(providers)}.' + f' More info in debug log.') return list(found_results.values()) @@ -713,9 +709,9 @@ def _search_provider_thread(provider, provider_results, show_obj, ep_obj_list, m search_count += 1 if 'eponly' == search_mode: - logger.log(u'Performing episode search for %s' % show_obj.unique_name) + logger.log(f'Performing episode search for {show_obj.unique_name}') else: - logger.log(u'Performing season pack search for %s' % show_obj.unique_name) + logger.log(f'Performing season pack search for {show_obj.unique_name}') try: provider.cache.clear_cache() @@ -726,10 +722,10 @@ def _search_provider_thread(provider, provider_results, show_obj, ep_obj_list, m len(v), (('multiep', 'season')[SEASON_RESULT == k], 'episode')['ep' in search_mode], helpers.maybe_plural(v)) for (k, v) in iteritems(search_result_list)])) except exceptions_helper.AuthException as e: - logger.error(u'Authentication error: %s' % ex(e)) + logger.error(f'Authentication error: {ex(e)}') break except (BaseException, Exception) as e: - logger.error(u'Error while searching %s, skipping: %s' % (provider.name, ex(e))) + logger.error(f'Error while searching {provider.name}, skipping: {ex(e)}') logger.error(traceback.format_exc()) break @@ -752,7 +748,7 @@ def _search_provider_thread(provider, provider_results, show_obj, ep_obj_list, m break search_mode = '%sonly' % ('ep', 'sp')['ep' in search_mode] - logger.log(u'Falling back to %s search ...' % ('season pack', 'episode')['ep' in search_mode]) + logger.log(f'Falling back to {("season pack", "episode")["ep" in search_mode]} search ...') if not provider_results: logger.log('No suitable result at [%s]' % provider.name) @@ -804,7 +800,7 @@ def cache_torrent_file( torrent_name = search_result.provider.regulate_cache_torrent_file(torrent_name) if not pick_best_result([search_result], show_obj, **kwargs) or \ not show_name_helpers.pass_wordlist_checks(torrent_name, indexer_lookup=False, show_obj=show_obj): - logger.log(u'Ignored %s that contains %s (debug log has detail)' % (result_name, torrent_name)) + logger.log(f'Ignored {result_name} that contains {torrent_name} (debug log has detail)') return return search_result @@ -848,7 +844,7 @@ def search_providers( # create a thread for each provider to search for cur_provider in provider_list: if cur_provider.anime_only and not show_obj.is_anime: - logger.debug(u'%s is not an anime, skipping' % show_obj.unique_name) + logger.debug(f'{show_obj.unique_name} is not an anime, skipping') continue provider_id = cur_provider.get_id() @@ -891,14 +887,14 @@ def search_providers( for cur_result in found_results[provider_id][cur_episode]: if Quality.UNKNOWN != cur_result.quality and highest_quality_overall < cur_result.quality: highest_quality_overall = cur_result.quality - logger.debug(u'%s is the highest quality of any match' % Quality.qualityStrings[highest_quality_overall]) + logger.debug(f'{Quality.qualityStrings[highest_quality_overall]} is the highest quality of any match') # see if every episode is wanted if best_season_result: # get the quality of the season nzb season_qual = best_season_result.quality - logger.log(u'%s is the quality of the season %s' % (Quality.qualityStrings[season_qual], - best_season_result.provider.providerType), logger.DEBUG) + logger.debug(f'{Quality.qualityStrings[season_qual]} is the quality of the season' + f' {best_season_result.provider.providerType}') my_db = db.DBConnection() sql = 'SELECT season, episode' \ @@ -907,8 +903,8 @@ def search_providers( (show_obj.tvid, show_obj.prodid, ','.join([str(x.season) for x in ep_obj_list])) ep_nums = [(int(x['season']), int(x['episode'])) for x in my_db.select(sql)] - logger.log(u'Executed query: [%s]' % sql) - logger.log(u'Episode list: %s' % ep_nums, logger.DEBUG) + logger.log(f'Executed query: [{sql}]') + logger.debug(f'Episode list: {ep_nums}') all_wanted = True any_wanted = False @@ -921,8 +917,8 @@ def search_providers( # if we need every ep in the season and there's nothing better, # then download this and be done with it (unless single episodes are preferred) if all_wanted and highest_quality_overall == best_season_result.quality: - logger.log(u'Every episode in this season is needed, downloading the whole %s %s' % - (best_season_result.provider.providerType, best_season_result.name)) + logger.log(f'Every episode in this season is needed, downloading the whole' + f' {best_season_result.provider.providerType} {best_season_result.name}') ep_obj_list = [] for ep_num in ep_nums: ep_obj_list.append(show_obj.get_episode(ep_num[0], ep_num[1])) @@ -931,11 +927,11 @@ def search_providers( return [best_season_result] elif not any_wanted: - logger.log(u'No episodes from this season are wanted at this quality, ignoring the result of ' + - best_season_result.name, logger.DEBUG) + logger.debug(f'No episodes from this season are wanted at this quality,' + f' ignoring the result of {best_season_result.name}') else: if GenericProvider.NZB == best_season_result.provider.providerType: - logger.log(u'Breaking apart the NZB and adding the individual ones to our results', logger.DEBUG) + logger.debug('Breaking apart the NZB and adding the individual ones to our results') # if not, break it apart and add them as the lowest priority results individual_results = nzbSplitter.split_result(best_season_result) @@ -959,8 +955,8 @@ def search_providers( else: # Season result from Torrent Provider must be a full-season torrent, creating multi-ep result for it - logger.log(u'Adding multi episode result for full season torrent. In your torrent client, set ' + - u'the episodes that you do not want to "don\'t download"') + logger.log('Adding multi episode result for full season torrent. In your torrent client,' + ' set the episodes that you do not want to "don\'t download"') ep_obj_list = [] for ep_num in ep_nums: ep_obj_list.append(show_obj.get_episode(ep_num[0], ep_num[1])) @@ -982,11 +978,11 @@ def search_providers( if MULTI_EP_RESULT in found_results[provider_id]: for multi_result in found_results[provider_id][MULTI_EP_RESULT]: - logger.log(u'Checking usefulness of multi episode result [%s]' % multi_result.name, logger.DEBUG) + logger.debug(f'Checking usefulness of multi episode result [{multi_result.name}]') if sickgear.USE_FAILED_DOWNLOADS and failed_history.has_failed(multi_result.name, multi_result.size, multi_result.provider.name): - logger.log(u'Rejecting previously failed multi episode result [%s]' % multi_result.name) + logger.log(f'Rejecting previously failed multi episode result [{multi_result.name}]') continue # see how many of the eps that this result covers aren't covered by single results @@ -1000,12 +996,12 @@ def search_providers( else: not_needed_eps.append(ep_num) - logger.log(u'Single episode check result is... needed episodes: %s, not needed episodes: %s' % - (needed_eps, not_needed_eps), logger.DEBUG) + logger.debug(f'Single episode check result is... needed episodes: {needed_eps},' + f' not needed episodes: {not_needed_eps}') if not not_needed_eps: - logger.log(u'All of these episodes were covered by single episode results, ' + - 'ignoring this multi episode result', logger.DEBUG) + logger.debug('All of these episodes were covered by single episode results,' + ' ignoring this multi episode result') continue # check if these eps are already covered by another multi-result @@ -1018,13 +1014,13 @@ def search_providers( else: multi_needed_eps.append(ep_num) - logger.log(u'Multi episode check result is... multi needed episodes: ' + - '%s, multi not needed episodes: %s' % (multi_needed_eps, multi_not_needed_eps), logger.DEBUG) + logger.debug(f'Multi episode check result is...' + f' multi needed episodes: {multi_needed_eps},' + f' multi not needed episodes: {multi_not_needed_eps}') if not multi_needed_eps: - logger.log(u'All of these episodes were covered by another multi episode nzb, ' + - 'ignoring this multi episode result', - logger.DEBUG) + logger.debug('All of these episodes were covered by another multi episode nzb,' + ' ignoring this multi episode result') continue # if we're keeping this multi-result then remember it @@ -1035,8 +1031,8 @@ def search_providers( for ep_obj in multi_result.ep_obj_list: ep_num = ep_obj.episode if ep_num in found_results[provider_id]: - logger.log(u'A needed multi episode result overlaps with a single episode result for episode ' + - '#%s, removing the single episode results from the list' % ep_num, logger.DEBUG) + logger.debug(f'A needed multi episode result overlaps with a single episode result' + f' for episode #{ep_num}, removing the single episode results from the list') del found_results[provider_id][ep_num] # of all the single ep results narrow it down to the best one for each episode @@ -1119,8 +1115,8 @@ def search_providers( if not len(provider_list): logger.warning('No NZB/Torrent providers in Media Providers/Options are allowed for active searching') elif not search_done: - logger.log('Failed active search of %s enabled provider%s. More info in debug log.' % ( - len(provider_list), helpers.maybe_plural(provider_list)), logger.ERROR) + logger.error(f'Failed active search of {len(provider_list)}' + f' enabled provider{helpers.maybe_plural(provider_list)}. More info in debug log.') elif not any(final_results): logger.log('No suitable candidates') diff --git a/sickgear/search_backlog.py b/sickgear/search_backlog.py index fa603986..e2936f52 100644 --- a/sickgear/search_backlog.py +++ b/sickgear/search_backlog.py @@ -105,7 +105,7 @@ class BacklogSearcher(object): def am_running(self): # type: (...) -> bool - logger.log(u'amWaiting: ' + str(self.amWaiting) + ', amActive: ' + str(self.amActive), logger.DEBUG) + logger.debug(f'amWaiting: {self.amWaiting}, amActive: {self.amActive}') return (not self.amWaiting) and self.amActive def add_backlog_item(self, @@ -197,7 +197,7 @@ class BacklogSearcher(object): :rtype: None """ if self.amActive and not which_shows: - logger.log(u'Backlog is still running, not starting it again', logger.DEBUG) + logger.debug('Backlog is still running, not starting it again') return if which_shows: @@ -216,12 +216,12 @@ class BacklogSearcher(object): and GenericProvider.TORRENT == x.providerType, sickgear.providers.sorted_sources())) if not any_torrent_enabled: - logger.log('Last scheduled backlog run was within the last day, skipping this run.', logger.DEBUG) + logger.debug('Last scheduled backlog run was within the last day, skipping this run.') return if not self.providers_active(any_torrent_enabled, standard_backlog): - logger.log('No NZB/Torrent provider has active searching enabled in config/Media Providers,' - ' cannot start backlog.', logger.WARNING) + logger.warning('No NZB/Torrent provider has active searching enabled in config/Media Providers,' + ' cannot start backlog.') return self._get_last_backlog() @@ -234,14 +234,14 @@ class BacklogSearcher(object): limited_backlog = False if standard_backlog and (any_torrent_enabled or sickgear.BACKLOG_NOFULL): - logger.log(u'Running limited backlog for episodes missed during the last %s day(s)' % - str(sickgear.BACKLOG_LIMITED_PERIOD)) + logger.log(f'Running limited backlog for episodes missed during the last' + f' {sickgear.BACKLOG_LIMITED_PERIOD} day(s)') from_date = limited_from_date limited_backlog = True runparts = [] if standard_backlog and not any_torrent_enabled and sickgear.BACKLOG_NOFULL: - logger.log(u'Skipping automated full backlog search because it is disabled in search settings') + logger.log('Skipping automated full backlog search because it is disabled in search settings') my_db = db.DBConnection('cache.db') if standard_backlog and not any_torrent_enabled and not sickgear.BACKLOG_NOFULL: @@ -333,7 +333,7 @@ class BacklogSearcher(object): @staticmethod def _get_last_runtime(): - logger.log('Retrieving the last runtime of Backlog from the DB', logger.DEBUG) + logger.debug('Retrieving the last runtime of Backlog from the DB') my_db = db.DBConnection() sql_result = my_db.select('SELECT * FROM info') @@ -350,7 +350,7 @@ class BacklogSearcher(object): return last_run_time def _set_last_runtime(self, when): - logger.log('Setting the last backlog runtime in the DB to %s' % when, logger.DEBUG) + logger.debug('Setting the last backlog runtime in the DB to %s' % when) my_db = db.DBConnection() sql_result = my_db.select('SELECT * FROM info') @@ -369,7 +369,7 @@ class BacklogSearcher(object): def _get_last_backlog(self): - logger.log('Retrieving the last check time from the DB', logger.DEBUG) + logger.debug('Retrieving the last check time from the DB') my_db = db.DBConnection() sql_result = my_db.select('SELECT * FROM info') @@ -389,7 +389,7 @@ class BacklogSearcher(object): @staticmethod def _set_last_backlog(when): - logger.log('Setting the last backlog in the DB to %s' % when, logger.DEBUG) + logger.debug('Setting the last backlog in the DB to %s' % when) my_db = db.DBConnection() sql_result = my_db.select('SELECT * FROM info') diff --git a/sickgear/search_queue.py b/sickgear/search_queue.py index 88d430d2..200910b9 100644 --- a/sickgear/search_queue.py +++ b/sickgear/search_queue.py @@ -83,7 +83,7 @@ class SearchQueue(generic_queue.GenericQueue): continue self.add_item(item, add_to_db=False) except (BaseException, Exception) as e: - logger.log('Exception loading queue %s: %s' % (self.__class__.__name__, ex(e)), logger.ERROR) + logger.error('Exception loading queue %s: %s' % (self.__class__.__name__, ex(e))) def _clear_sql(self): return [ @@ -322,7 +322,7 @@ class SearchQueue(generic_queue.GenericQueue): # manual and failed searches generic_queue.GenericQueue.add_item(self, item, add_to_db=add_to_db) else: - logger.log(u'Not adding item, it\'s already in the queue', logger.DEBUG) + logger.debug("Not adding item, it's already in the queue") class RecentSearchQueueItem(generic_queue.QueueItem): @@ -367,24 +367,23 @@ class RecentSearchQueueItem(generic_queue.QueueItem): self._check_for_propers(needed) if not self.ep_obj_list: - logger.log(u'No search of cache for episodes required') + logger.log('No search of cache for episodes required') self.success = True else: num_shows = len(set([ep_obj.show_obj.name for ep_obj in self.ep_obj_list])) - logger.log(u'Found %d needed episode%s spanning %d show%s' - % (len(self.ep_obj_list), helpers.maybe_plural(self.ep_obj_list), - num_shows, helpers.maybe_plural(num_shows))) + logger.log(f'Found {len(self.ep_obj_list):d} needed episode{helpers.maybe_plural(self.ep_obj_list)}' + f' spanning {num_shows:d} show{helpers.maybe_plural(num_shows)}') try: - logger.log(u'Beginning recent search for episodes') + logger.log('Beginning recent search for episodes') # noinspection PyTypeChecker search_results = search.search_for_needed_episodes(self.ep_obj_list) if not len(search_results): - logger.log(u'No needed episodes found') + logger.log('No needed episodes found') else: for result in search_results: - logger.log(u'Downloading %s from %s' % (result.name, result.provider.name)) + logger.log(f'Downloading {result.name} from {result.provider.name}') self.success = search.snatch_episode(result) if self.success: for ep_obj in result.ep_obj_list: @@ -399,7 +398,7 @@ class RecentSearchQueueItem(generic_queue.QueueItem): helpers.cpu_sleep() except (BaseException, Exception): - logger.log(traceback.format_exc(), logger.ERROR) + logger.error(traceback.format_exc()) if None is self.success: self.success = False @@ -497,13 +496,13 @@ class RecentSearchQueueItem(generic_queue.QueueItem): wanted |= (False, True)[common.WANTED == ep_obj.status] if not wanted: - logger.log(u'No unaired episodes marked wanted') + logger.log('No unaired episodes marked wanted') if 0 < len(sql_l): my_db = db.DBConnection() my_db.mass_action(sql_l) if wanted: - logger.log(u'Found new episodes marked wanted') + logger.log('Found new episodes marked wanted') @staticmethod def update_providers(needed=common.NeededQualities(need_all=True)): @@ -533,8 +532,7 @@ class RecentSearchQueueItem(generic_queue.QueueItem): threads[-1].start() if not len(providers): - logger.log('No NZB/Torrent providers in Media Providers/Options are enabled to match recent episodes', - logger.WARNING) + logger.warning('No NZB/Torrent providers in Media Providers/Options are enabled to match recent episodes') if threads: # wait for all threads to finish @@ -637,7 +635,7 @@ class ManualSearchQueueItem(BaseSearchQueueItem): generic_queue.QueueItem.run(self) try: - logger.log(u'Beginning manual search for: [%s]' % self.segment.pretty_name()) + logger.log(f'Beginning manual search for: [{self.segment.pretty_name()}]') self.started = True ep_count, ep_count_scene = get_aired_in_season(self.show_obj) @@ -656,7 +654,7 @@ class ManualSearchQueueItem(BaseSearchQueueItem): if search_result: for result in search_result: # type: sickgear.classes.NZBSearchResult - logger.log(u'Downloading %s from %s' % (result.name, result.provider.name)) + logger.log(f'Downloading {result.name} from {result.provider.name}') self.success = search.snatch_episode(result) for ep_obj in result.ep_obj_list: # type: sickgear.tv.TVEpisode self.snatched_eps.add(SimpleNamespace(tvid_prodid=ep_obj.show_obj.tvid_prodid, @@ -673,12 +671,12 @@ class ManualSearchQueueItem(BaseSearchQueueItem): break else: ui.notifications.message('No downloads found', - u'Could not find a download for %s' % self.segment.pretty_name()) + f'Could not find a download for {self.segment.pretty_name()}') - logger.log(u'Unable to find a download for: [%s]' % self.segment.pretty_name()) + logger.log(f'Unable to find a download for: [{self.segment.pretty_name()}]') except (BaseException, Exception): - logger.log(traceback.format_exc(), logger.ERROR) + logger.error(traceback.format_exc()) finally: # Keep a list with the last executed searches @@ -729,7 +727,7 @@ class BacklogQueueItem(BaseSearchQueueItem): for ep_obj in self.segment: # type: sickgear.tv.TVEpisode set_wanted_aired(ep_obj, True, ep_count, ep_count_scene) - logger.log(u'Beginning backlog search for: [%s]' % self.show_obj.unique_name) + logger.log(f'Beginning backlog search for: [{self.show_obj.unique_name}]') search_result = search.search_providers( self.show_obj, self.segment, False, try_other_searches=(not self.standard_backlog or not self.limited_backlog), @@ -737,7 +735,7 @@ class BacklogQueueItem(BaseSearchQueueItem): if search_result: for result in search_result: # type: sickgear.classes.NZBSearchResult - logger.log(u'Downloading %s from %s' % (result.name, result.provider.name)) + logger.log(f'Downloading {result.name} from {result.provider.name}') if search.snatch_episode(result): for ep_obj in result.ep_obj_list: # type: sickgear.tv.TVEpisode self.snatched_eps.add(SimpleNamespace(tvid_prodid=ep_obj.show_obj.tvid_prodid, @@ -750,10 +748,10 @@ class BacklogQueueItem(BaseSearchQueueItem): helpers.cpu_sleep() else: - logger.log(u'No needed episodes found during backlog search for: [%s]' % self.show_obj.unique_name) + logger.log(f'No needed episodes found during backlog search for: [{self.show_obj.unique_name}]') except (BaseException, Exception): is_error = True - logger.log(traceback.format_exc(), logger.ERROR) + logger.error(traceback.format_exc()) finally: logger.log('Completed backlog search %sfor: [%s]' @@ -783,7 +781,7 @@ class FailedQueueItem(BaseSearchQueueItem): ep_count, ep_count_scene = get_aired_in_season(self.show_obj) for ep_obj in self.segment: # type: sickgear.tv.TVEpisode - logger.log(u'Marking episode as bad: [%s]' % ep_obj.pretty_name()) + logger.log(f'Marking episode as bad: [{ep_obj.pretty_name()}]') failed_history.set_episode_failed(ep_obj) (release, provider) = failed_history.find_release(ep_obj) @@ -792,14 +790,14 @@ class FailedQueueItem(BaseSearchQueueItem): failed_history.add_failed(release) history.log_failed(ep_obj, release, provider) - logger.log(u'Beginning failed download search for: [%s]' % ep_obj.pretty_name()) + logger.log(f'Beginning failed download search for: [{ep_obj.pretty_name()}]') set_wanted_aired(ep_obj, True, ep_count, ep_count_scene, manual=True) search_result = search.search_providers(self.show_obj, self.segment, True, try_other_searches=True) or [] for result in search_result: # type: sickgear.classes.NZBSearchResult - logger.log(u'Downloading %s from %s' % (result.name, result.provider.name)) + logger.log(f'Downloading {result.name} from {result.provider.name}') if search.snatch_episode(result): for ep_obj in result.ep_obj_list: # type: sickgear.tv.TVEpisode self.snatched_eps.add(SimpleNamespace(tvid_prodid=ep_obj.show_obj.tvid_prodid, @@ -813,9 +811,9 @@ class FailedQueueItem(BaseSearchQueueItem): helpers.cpu_sleep() else: pass - # logger.log(u'No valid episode found to retry for: [%s]' % self.segment.pretty_name()) + # logger.log(f'No valid episode found to retry for: [{self.segment.pretty_name()}]') except (BaseException, Exception): - logger.log(traceback.format_exc(), logger.ERROR) + logger.error(traceback.format_exc()) finally: # Keep a list with the last executed searches diff --git a/sickgear/sgdatetime.py b/sickgear/sgdatetime.py index f963c76d..1e6ffaf0 100644 --- a/sickgear/sgdatetime.py +++ b/sickgear/sgdatetime.py @@ -211,7 +211,7 @@ class SGDatetime(datetime.datetime): obj = (dt, self)[self is not None] # type: datetime.datetime try: if None is not obj: - strd = u'%s, %s' % ( + strd = '%s, %s' % ( SGDatetime.sbstrftime(obj, (sickgear.DATE_PRESET, d_preset)[None is not d_preset]), SGDatetime.sbftime(dt, show_seconds, t_preset, False, markup)) diff --git a/sickgear/show_name_helpers.py b/sickgear/show_name_helpers.py index 0ee26627..f688c1d5 100644 --- a/sickgear/show_name_helpers.py +++ b/sickgear/show_name_helpers.py @@ -62,14 +62,14 @@ def pass_wordlist_checks(name, # type: AnyStr """ if parse: - err_msg = u'Unable to parse the filename %s into a valid ' % name + err_msg = f'Unable to parse the filename {name} into a valid ' try: NameParser(indexer_lookup=indexer_lookup).parse(name) except InvalidNameException: - logger.log(err_msg + 'episode', logger.DEBUG) + logger.debug(err_msg + 'episode') return False except InvalidShowException: - logger.log(err_msg + 'show', logger.DEBUG) + logger.debug(err_msg + 'show') return False word_list = {'sub(bed|ed|pack|s)', '(dk|fin|heb|kor|nor|nordic|pl|swe)sub(bed|ed|s)?', @@ -94,7 +94,7 @@ def pass_wordlist_checks(name, # type: AnyStr result = result or contains_any(name, word_list, rx=sickgear.IGNORE_WORDS_REGEX) if None is not result and result: - logger.log(u'Ignored: %s for containing ignore word' % name, logger.DEBUG) + logger.debug(f'Ignored: {name} for containing ignore word') return False result = None @@ -108,7 +108,7 @@ def pass_wordlist_checks(name, # type: AnyStr # if any of the good strings aren't in the name then say no result = result or not_contains_any(name, req_word_list, rx=sickgear.REQUIRE_WORDS_REGEX) if None is not result and result: - logger.log(u'Ignored: %s for not containing required word match' % name, logger.DEBUG) + logger.debug(f'Ignored: {name} for not containing required word match') return False return True @@ -160,7 +160,7 @@ def contains_any(subject, # type: AnyStr if (match and not invert) or (not match and invert): msg = match and not invert and 'Found match' or '' msg = not match and invert and 'No match found' or msg - logger.log(u'%s from pattern: %s in text: %s ' % (msg, rc_filter.pattern, subject), logger.DEBUG) + logger.debug(f'{msg} from pattern: {rc_filter.pattern} in text: {subject} ') return True return False return None @@ -190,13 +190,11 @@ def compile_word_list(lookup_words, # type: Union[AnyStr, Set[AnyStr]] subject = search_raw and re.escape(word) or re.sub(r'([\" \'])', r'\\\1', word) result.append(re.compile('(?i)%s%s%s' % (re_prefix, subject, re_suffix))) except re.error as e: - logger.log(u'Failure to compile filter expression: %s ... Reason: %s' % (word, ex(e)), - logger.DEBUG) + logger.debug(f'Failure to compile filter expression: {word} ... Reason: {ex(e)}') diff = len(lookup_words) - len(result) if diff: - logger.log(u'From %s expressions, %s was discarded during compilation' % (len(lookup_words), diff), - logger.DEBUG) + logger.debug(f'From {len(lookup_words)} expressions, {diff} was discarded during compilation') return result @@ -430,7 +428,7 @@ def determine_release_name(dir_name=None, nzb_name=None): """ if None is not nzb_name: - logger.log(u'Using nzb name for release name.') + logger.log('Using nzb name for release name.') return nzb_name.rpartition('.')[0] if not dir_name or not os.path.isdir(dir_name): @@ -446,7 +444,7 @@ def determine_release_name(dir_name=None, nzb_name=None): if 1 == len(results): found_file = results[0].rpartition('.')[0] if pass_wordlist_checks(found_file): - logger.log(u'Release name (%s) found from file (%s)' % (found_file, results[0])) + logger.log(f'Release name ({found_file}) found from file ({results[0]})') return found_file.rpartition('.')[0] # If that fails, we try the folder @@ -455,7 +453,7 @@ def determine_release_name(dir_name=None, nzb_name=None): # NOTE: Multiple failed downloads will change the folder name. # (e.g., appending #s) # Should we handle that? - logger.log(u'Folder name (%s) appears to be a valid release name. Using it.' % folder) + logger.log(f'Folder name ({folder}) appears to be a valid release name. Using it.') return folder return None diff --git a/sickgear/show_queue.py b/sickgear/show_queue.py index 03046c93..e083bb40 100644 --- a/sickgear/show_queue.py +++ b/sickgear/show_queue.py @@ -126,7 +126,7 @@ class ShowQueue(generic_queue.GenericQueue): lang=cur_row['lang'], uid=cur_row['uid'], add_to_db=False) except (BaseException, Exception) as e: - logger.log('Exception loading queue %s: %s' % (self.__class__.__name__, ex(e)), logger.ERROR) + logger.error('Exception loading queue %s: %s' % (self.__class__.__name__, ex(e))) def save_item(self, item): # type: (ShowQueueItem) -> None @@ -223,7 +223,7 @@ class ShowQueue(generic_queue.GenericQueue): else: my_db.action('DELETE FROM tv_src_switch WHERE uid = ?', [item.uid]) except (BaseException, Exception) as e: - logger.log('Exception deleting item %s from db: %s' % (item, ex(e)), logger.ERROR) + logger.error('Exception deleting item %s from db: %s' % (item, ex(e))) else: generic_queue.GenericQueue.delete_item(self, item) @@ -544,8 +544,8 @@ class ShowQueue(generic_queue.GenericQueue): if ((not after_update and self.is_being_updated(show_obj)) or self.is_in_update_queue(show_obj)) and not force: - logger.log('Skipping this refresh as there is already an update queued or' - ' in progress and a refresh is done at the end of an update anyway.', logger.DEBUG) + logger.debug('Skipping this refresh as there is already an update queued or' + ' in progress and a refresh is done at the end of an update anyway.') return if self.is_show_being_switched(show_obj): @@ -976,22 +976,22 @@ class QueueItemAdd(ShowQueueItem): if self.lang: tvinfo_config['language'] = self.lang - logger.log(u'' + str(sickgear.TVInfoAPI(self.tvid).name) + ': ' + repr(tvinfo_config)) + logger.log(f'{sickgear.TVInfoAPI(self.tvid).name}: {repr(tvinfo_config)}') t = sickgear.TVInfoAPI(self.tvid).setup(**tvinfo_config) s = t.get_show(self.prodid, load_episodes=False, language=self.lang) if getattr(t, 'show_not_found', False): - logger.log('Show %s was not found on %s, maybe show was deleted' % - (self.show_name, sickgear.TVInfoAPI(self.tvid).name), logger.ERROR) + logger.error(f'Show {self.show_name} was not found on {sickgear.TVInfoAPI(self.tvid).name},' + f' maybe show was deleted') self._finish_early() return # this usually only happens if they have an NFO in their show dir # which gave us a TV info source ID that has no proper english version of the show if None is getattr(s, 'seriesname', None): - logger.log('Show in %s has no name on %s, probably the wrong language used to search with.' % - (self.showDir, sickgear.TVInfoAPI(self.tvid).name), logger.ERROR) + logger.error(f'Show in {self.showDir} has no name on {sickgear.TVInfoAPI(self.tvid).name},' + f' probably the wrong language used to search with.') ui.notifications.error('Unable to add show', 'Show in %s has no name on %s, probably the wrong language.' ' Delete .nfo and add manually in the correct language.' % @@ -999,8 +999,7 @@ class QueueItemAdd(ShowQueueItem): self._finish_early() return except (BaseException, Exception): - logger.log('Unable to find show ID:%s on TV info: %s' % (self.prodid, sickgear.TVInfoAPI(self.tvid).name), - logger.ERROR) + logger.error('Unable to find show ID:%s on TV info: %s' % (self.prodid, sickgear.TVInfoAPI(self.tvid).name)) ui.notifications.error('Unable to add show', 'Unable to look up the show in %s on %s using ID %s, not using the NFO.' ' Delete .nfo and try adding manually again.' % @@ -1046,9 +1045,7 @@ class QueueItemAdd(ShowQueueItem): self.show_obj.sports = 1 except BaseTVinfoException as e: - logger.log( - 'Unable to add show due to an error with %s: %s' % (sickgear.TVInfoAPI(self.tvid).name, ex(e)), - logger.ERROR) + logger.error(f'Unable to add show due to an error with {sickgear.TVInfoAPI(self.tvid).name}: {ex(e)}') if self.show_obj: ui.notifications.error('Unable to add %s due to an error with %s' % (self.show_obj.unique_name, sickgear.TVInfoAPI(self.tvid).name)) @@ -1059,14 +1056,14 @@ class QueueItemAdd(ShowQueueItem): return except exceptions_helper.MultipleShowObjectsException: - logger.log('The show in %s is already in your show list, skipping' % self.showDir, logger.ERROR) + logger.error('The show in %s is already in your show list, skipping' % self.showDir) ui.notifications.error('Show skipped', 'The show in %s is already in your show list' % self.showDir) self._finish_early() return except (BaseException, Exception) as e: - logger.log('Error trying to add show: %s' % ex(e), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('Error trying to add show: %s' % ex(e)) + logger.error(traceback.format_exc()) self._finish_early() raise @@ -1075,8 +1072,8 @@ class QueueItemAdd(ShowQueueItem): try: self.show_obj.save_to_db() except (BaseException, Exception) as e: - logger.log('Error saving the show to the database: %s' % ex(e), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('Error saving the show to the database: %s' % ex(e)) + logger.error(traceback.format_exc()) self._finish_early() raise @@ -1092,16 +1089,15 @@ class QueueItemAdd(ShowQueueItem): self.show_obj.load_episodes_from_tvinfo(tvinfo_data=(None, result)[ self.show_obj.prodid == getattr(result, 'id', None)]) except (BaseException, Exception) as e: - logger.log( - 'Error with %s, not creating episode list: %s' % (sickgear.TVInfoAPI(self.show_obj.tvid).name, ex(e)), - logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error(f'Error with {sickgear.TVInfoAPI(self.show_obj.tvid).name},' + f' not creating episode list: {ex(e)}') + logger.error(traceback.format_exc()) try: self.show_obj.load_episodes_from_dir() except (BaseException, Exception) as e: - logger.log('Error searching directory for episodes: %s' % ex(e), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('Error searching directory for episodes: %s' % ex(e)) + logger.error(traceback.format_exc()) # if they gave a custom status then change all the eps to it my_db = db.DBConnection() @@ -1149,8 +1145,8 @@ class QueueItemAdd(ShowQueueItem): try: self.show_obj.save_to_db() except (BaseException, Exception) as e: - logger.log('Error saving the show to the database: %s' % ex(e), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('Error saving the show to the database: %s' % ex(e)) + logger.error(traceback.format_exc()) self._finish_early() raise @@ -1288,8 +1284,7 @@ class QueueItemRename(ShowQueueItem): try: _ = self.show_obj.location except exceptions_helper.ShowDirNotFoundException: - logger.log('Can\'t perform rename on %s when the show directory is missing.' - % self.show_obj.unique_name, logger.WARNING) + logger.warning(f'Can\'t perform rename on {self.show_obj.unique_name} when the show directory is missing.') return ep_obj_rename_list = [] @@ -1386,7 +1381,7 @@ class QueueItemUpdate(ShowQueueItem): logger.log('Beginning update of %s' % self.show_obj.unique_name) - logger.log('Retrieving show info from %s' % sickgear.TVInfoAPI(self.show_obj.tvid).name, logger.DEBUG) + logger.debug('Retrieving show info from %s' % sickgear.TVInfoAPI(self.show_obj.tvid).name) try: result = self.show_obj.load_from_tvinfo(cache=not self.force, tvinfo_data=self.tvinfo_data, scheduled_update=self.scheduled_update, switch=self.switch) @@ -1395,12 +1390,11 @@ class QueueItemUpdate(ShowQueueItem): elif not self.show_obj.prodid == getattr(self.tvinfo_data, 'id', None): self.tvinfo_data = result except BaseTVinfoAttributenotfound as e: - logger.log('Data retrieved from %s was incomplete, aborting: %s' % - (sickgear.TVInfoAPI(self.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Data retrieved from {sickgear.TVInfoAPI(self.show_obj.tvid).name} was incomplete,' + f' aborting: {ex(e)}') return except BaseTVinfoError as e: - logger.log('Unable to contact %s, aborting: %s' % (sickgear.TVInfoAPI(self.show_obj.tvid).name, ex(e)), - logger.WARNING) + logger.warning('Unable to contact %s, aborting: %s' % (sickgear.TVInfoAPI(self.show_obj.tvid).name, ex(e))) return if self.force_web: @@ -1409,22 +1403,22 @@ class QueueItemUpdate(ShowQueueItem): try: self.show_obj.save_to_db() except (BaseException, Exception) as e: - logger.log('Error saving the show to the database: %s' % ex(e), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('Error saving the show to the database: %s' % ex(e)) + logger.error(traceback.format_exc()) # get episode list from DB - logger.log('Loading all episodes from the database', logger.DEBUG) + logger.debug('Loading all episodes from the database') db_ep_obj_list = self.show_obj.load_episodes_from_db(update=True) # get episode list from TVDB - logger.log('Loading all episodes from %s' % sickgear.TVInfoAPI(self.show_obj.tvid).name, logger.DEBUG) + logger.debug('Loading all episodes from %s' % sickgear.TVInfoAPI(self.show_obj.tvid).name) try: tvinfo_ep_list = self.show_obj.load_episodes_from_tvinfo(cache=not self.force, update=True, tvinfo_data=self.tvinfo_data, switch=self.switch, old_tvid=self.old_tvid, old_prodid=self.old_prodid) except BaseTVinfoException as e: - logger.log('Unable to get info from %s, the show info will not be refreshed: %s' % - (sickgear.TVInfoAPI(self.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Unable to get info from {sickgear.TVInfoAPI(self.show_obj.tvid).name},' + f' the show info will not be refreshed: {ex(e)}') tvinfo_ep_list = None if None is tvinfo_ep_list: @@ -1437,7 +1431,7 @@ class QueueItemUpdate(ShowQueueItem): # for each ep we found on TVDB delete it from the DB list for cur_season in tvinfo_ep_list: for cur_episode in tvinfo_ep_list[cur_season]: - logger.log('Removing %sx%s from the DB list' % (cur_season, cur_episode), logger.DEBUG) + logger.debug('Removing %sx%s from the DB list' % (cur_season, cur_episode)) if cur_season in db_ep_obj_list and cur_episode in db_ep_obj_list[cur_season]: del db_ep_obj_list[cur_season][cur_episode] @@ -1451,15 +1445,14 @@ class QueueItemUpdate(ShowQueueItem): if self.switch: cl.append(self.show_obj.switch_ep_change_sql( self.old_tvid, self.old_prodid, cur_season, cur_episode, TVSWITCH_EP_DELETED)) - logger.log('Permanently deleting episode %sx%s from the database' % - (cur_season, cur_episode), logger.MESSAGE) + logger.log(f'Permanently deleting episode {cur_season}x{cur_episode} from the database') try: cl.extend(ep_obj.delete_episode(return_sql=True)) except exceptions_helper.EpisodeDeletedException: pass else: - logger.log('Not deleting episode %sx%s from the database because status is: %s' % - (cur_season, cur_episode, statusStrings[status]), logger.MESSAGE) + logger.log(f'Not deleting episode {cur_season}x{cur_episode} from the database' + f' because status is: {statusStrings[status]}') if cl: my_db = db.DBConnection() @@ -1606,7 +1599,7 @@ class QueueItemSwitchSource(ShowQueueItem): else: which_show = '%s:%s' % (self.old_tvid, self.old_prodid) self._set_switch_tbl_status(TVSWITCH_SAME_ID) - logger.log('Unchanged ids given, nothing to do for %s' % which_show, logger.ERROR) + logger.error('Unchanged ids given, nothing to do for %s' % which_show) return True return False @@ -1647,7 +1640,7 @@ class QueueItemSwitchSource(ShowQueueItem): which_show = '%s:%s' % (self.old_tvid, self.old_prodid) ui.notifications.message('TV info source switch: %s' % which_show, 'Error: could not find a id for show on new tv info source') - logger.log('Error: could not find a id for show on new tv info source: %s' % which_show, logger.WARNING) + logger.warning('Error: could not find a id for show on new tv info source: %s' % which_show) self._set_switch_tbl_status(TVSWITCH_NO_NEW_ID) return @@ -1662,7 +1655,7 @@ class QueueItemSwitchSource(ShowQueueItem): which_show = self.show_obj.unique_name else: which_show = '%s:%s' % (self.old_tvid, self.old_prodid) - logger.log('Duplicate shows in DB for show: %s' % which_show, logger.WARNING) + logger.warning('Duplicate shows in DB for show: %s' % which_show) ui.notifications.message('TV info source switch: %s' % which_show, 'Error: %s' % msg) self._set_switch_tbl_status(TVSWITCH_DUPLICATE_SHOW) @@ -1676,7 +1669,7 @@ class QueueItemSwitchSource(ShowQueueItem): ui.notifications.message('TV info source switch: %s' % which_show, 'Error: %s' % msg) self._set_switch_tbl_status(TVSWITCH_SOURCE_NOT_FOUND_ERROR) - logger.log('Unable to find the specified show: %s' % which_show, logger.WARNING) + logger.warning('Unable to find the specified show: %s' % which_show) return tvinfo_config = sickgear.TVInfoAPI(self.new_tvid).api_params.copy() @@ -1696,8 +1689,8 @@ class QueueItemSwitchSource(ShowQueueItem): td = t.get_show(show_id=new_prodid, actors=True, language=self.show_obj.lang) except (BaseException, Exception): td = None - logger.log('Failed to get new tv show id (%s) from source %s' % - (new_prodid, sickgear.TVInfoAPI(self.new_tvid).name), logger.WARNING) + logger.warning(f'Failed to get new tv show id ({new_prodid})' + f' from source {sickgear.TVInfoAPI(self.new_tvid).name}') if None is td: self._set_switch_tbl_status(TVSWITCH_NOT_FOUND_ERROR) msg = 'Show not found on new tv source' @@ -1706,7 +1699,7 @@ class QueueItemSwitchSource(ShowQueueItem): else: which_show = '%s:%s' % (self.old_tvid, self.old_prodid) ui.notifications.message('TV info source switch: %s' % which_show, 'Error: %s' % msg) - logger.log('show: %s not found on new tv source' % self.show_obj.tvid_prodid, logger.WARNING) + logger.warning('show: %s not found on new tv source' % self.show_obj.tvid_prodid) return try: @@ -1756,7 +1749,7 @@ class QueueItemSwitchSource(ShowQueueItem): msg = 'Show %s new id conflicts with existing show: %s' % \ ('[%s (%s)]' % (self.show_obj.unique_name, self.show_obj.tvid_prodid), '[%s (%s)]' % (new_show_obj.unique_name, new_show_obj.tvid_prodid)) - logger.log(msg, logger.WARNING) + logger.warning(msg) return self.progress = 'Switching to new source' self._set_switch_id(new_prodid) diff --git a/sickgear/show_updater.py b/sickgear/show_updater.py index 9d6970be..144398a7 100644 --- a/sickgear/show_updater.py +++ b/sickgear/show_updater.py @@ -72,95 +72,95 @@ class ShowUpdater(object): try: sickgear.db.backup_all_dbs(sickgear.BACKUP_DB_PATH or os.path.join(sickgear.DATA_DIR, 'backup')) except (BaseException, Exception): - logger.log('backup db error', logger.ERROR) + logger.error('backup db error') # refresh network timezones try: network_timezones.update_network_dict() except (BaseException, Exception): - logger.log('network timezone update error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('network timezone update error') + logger.error(traceback.format_exc()) # refresh webdl types try: properFinder.load_webdl_types() except (BaseException, Exception): - logger.log('error loading webdl_types', logger.DEBUG) + logger.debug('error loading webdl_types') # update xem id lists try: sickgear.scene_exceptions.get_xem_ids() except (BaseException, Exception): - logger.log('xem id list update error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('xem id list update error') + logger.error(traceback.format_exc()) # update scene exceptions try: sickgear.scene_exceptions.retrieve_exceptions() except (BaseException, Exception): - logger.log('scene exceptions update error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('scene exceptions update error') + logger.error(traceback.format_exc()) # clear the data of unused providers try: sickgear.helpers.clear_unused_providers() except (BaseException, Exception): - logger.log('unused provider cleanup error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('unused provider cleanup error') + logger.error(traceback.format_exc()) # cleanup image cache try: sickgear.helpers.cleanup_cache() except (BaseException, Exception): - logger.log('image cache cleanup error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('image cache cleanup error') + logger.error(traceback.format_exc()) # check tvinfo cache try: for i in sickgear.TVInfoAPI().all_sources: sickgear.TVInfoAPI(i).setup().check_cache() except (BaseException, Exception): - logger.log('tvinfo cache check error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('tvinfo cache check error') + logger.error(traceback.format_exc()) # cleanup tvinfo cache try: for i in sickgear.TVInfoAPI().all_sources: sickgear.TVInfoAPI(i).setup().clean_cache() except (BaseException, Exception): - logger.log('tvinfo cache cleanup error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('tvinfo cache cleanup error') + logger.error(traceback.format_exc()) # cleanup ignore and require lists try: clean_ignore_require_words() except (BaseException, Exception): - logger.log('ignore, require words cleanup error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('ignore, require words cleanup error') + logger.error(traceback.format_exc()) # cleanup manual search history sickgear.search_queue.remove_old_fifo(sickgear.search_queue.MANUAL_SEARCH_HISTORY) # add missing mapped ids if not sickgear.background_mapping_task.is_alive(): - logger.log(u'Updating the TV info mappings') + logger.log('Updating the TV info mappings') import threading try: sickgear.background_mapping_task = threading.Thread( name='MAPPINGSUPDATER', target=sickgear.indexermapper.load_mapped_ids, kwargs={'update': True}) sickgear.background_mapping_task.start() except (BaseException, Exception): - logger.log('missing mapped ids update error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('missing mapped ids update error') + logger.error(traceback.format_exc()) - logger.log(u'Doing full update on all shows') + logger.log('Doing full update on all shows') # clean out cache directory, remove everything > 12 hours old try: sickgear.helpers.clear_cache() except (BaseException, Exception): - logger.log('cache dir cleanup error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('cache dir cleanup error') + logger.error(traceback.format_exc()) # select 10 'Ended' tv_shows updated more than 90 days ago # and all shows not updated more than 180 days ago to include in this update @@ -208,21 +208,21 @@ class ShowUpdater(object): cur_queue_item = sickgear.show_queue_scheduler.action.update_show( cur_show_obj, scheduled_update=True) else: - logger.debug(u'Not updating episodes for show %s because it\'s marked as ended and last/next' - u' episode is not within the grace period.' % cur_show_obj.unique_name) + logger.debug(f'Not updating episodes for show {cur_show_obj.unique_name} because it\'s' + f' marked as ended and last/next episode is not within the grace period.') cur_queue_item = sickgear.show_queue_scheduler.action.refresh_show(cur_show_obj, True, True) pi_list.append(cur_queue_item) except (exceptions_helper.CantUpdateException, exceptions_helper.CantRefreshException) as e: - logger.log(u'Automatic update failed: ' + ex(e), logger.ERROR) + logger.error(f'Automatic update failed: {ex(e)}') if len(pi_list): sickgear.show_queue_scheduler.action.daily_update_running = True ui.ProgressIndicators.set_indicator('dailyUpdate', ui.QueueProgressIndicator('Daily Update', pi_list)) - logger.log(u'Added all shows to show queue for full update') + logger.log('Added all shows to show queue for full update') finally: self.amActive = False diff --git a/sickgear/subtitles.py b/sickgear/subtitles.py index 2cffd798..c8cda3a0 100644 --- a/sickgear/subtitles.py +++ b/sickgear/subtitles.py @@ -124,11 +124,11 @@ class SubtitlesFinder(object): def _main(self): if 1 > len(sickgear.subtitles.get_enabled_service_list()): - logger.log(u'Not enough services selected. At least 1 service is required to' - u' search subtitles in the background', logger.ERROR) + logger.error('Not enough services selected. At least 1 service is required to' + ' search subtitles in the background') return - logger.log(u'Checking for subtitles', logger.MESSAGE) + logger.log('Checking for subtitles') # get episodes on which we want subtitles # criteria is: @@ -164,8 +164,8 @@ class SubtitlesFinder(object): for cur_result in sql_result: if not os.path.isfile(cur_result['location']): - logger.log('Episode file does not exist, cannot download subtitles for episode %dx%d of show %s' - % (cur_result['season'], cur_result['episode'], cur_result['show_name']), logger.DEBUG) + logger.debug(f'Episode file does not exist, cannot download subtitles for episode' + f' {cur_result["season"]:d}x{cur_result["episode"]:d} of show {cur_result["show_name"]}') continue # Old shows rule @@ -177,17 +177,17 @@ class SubtitlesFinder(object): (cur_result['airdate_daydiff'] <= 7 and cur_result['searchcount'] < 7 and now - datetime.datetime.strptime(cur_result['lastsearch'], '%Y-%m-%d %H:%M:%S') > datetime.timedelta(hours=rules['new'][cur_result['searchcount']]))): - logger.log('Downloading subtitles for episode %dx%d of show %s' - % (cur_result['season'], cur_result['episode'], cur_result['show_name']), logger.DEBUG) + logger.debug(f'Downloading subtitles for episode {cur_result["season"]:d}x{cur_result["episode"]:d}' + f' of show {cur_result["show_name"]}') show_obj = helpers.find_show_by_id({int(cur_result['tv_id']): int(cur_result['prod_id'])}) if not show_obj: - logger.log(u'Show not found', logger.DEBUG) + logger.debug('Show not found') return ep_obj = show_obj.get_episode(int(cur_result['season']), int(cur_result['episode'])) if isinstance(ep_obj, str): - logger.log(u'Episode not found', logger.DEBUG) + logger.debug('Episode not found') return # noinspection PyUnusedLocal @@ -197,7 +197,7 @@ class SubtitlesFinder(object): # noinspection PyUnusedLocal subtitles = ep_obj.download_subtitles() except (BaseException, Exception): - logger.log(u'Unable to find subtitles', logger.DEBUG) + logger.debug('Unable to find subtitles') return @staticmethod diff --git a/sickgear/tv.py b/sickgear/tv.py index 23641792..b9e6d5db 100644 --- a/sickgear/tv.py +++ b/sickgear/tv.py @@ -1589,8 +1589,7 @@ class TVShow(TVShowBase): self._paused = int(value) self.dirty = True else: - logger.log('tried to set paused property to invalid value: %s of type: %s' % (value, type(value)), - logger.ERROR) + logger.error('tried to set paused property to invalid value: %s of type: %s' % (value, type(value))) @property def ids(self): @@ -1644,7 +1643,7 @@ class TVShow(TVShowBase): def _set_location(self, new_location): # type: (AnyStr) -> None - logger.log('Setter sets location to %s' % new_location, logger.DEBUG) + logger.debug('Setter sets location to %s' % new_location) # Don't validate dir if user wants to add shows without creating a dir if sickgear.ADD_SHOWS_WO_DIR or os.path.isdir(new_location): self.dirty_setter('_location')(self, new_location) @@ -1781,8 +1780,8 @@ class TVShow(TVShowBase): if no_create: return - # logger.log('%s: An object for episode %sx%s did not exist in the cache, trying to create it' % - # (self.tvid_prodid, season, episode), logger.DEBUG) + # logger.debug('%s: An object for episode %sx%s did not exist in the cache, trying to create it' % + # (self.tvid_prodid, season, episode)) if path and not existing_only: ep_obj = TVEpisode(self, season, episode, path, show_result=ep_result) @@ -1993,8 +1992,7 @@ class TVShow(TVShowBase): # In some situations self.status = None, need to figure out where that is! if not self._status: self.status = '' - logger.log('Status missing for show: [%s] with status: [%s]' % - (self.tvid_prodid, self._status), logger.DEBUG) + logger.debug(f'Status missing for show: [{self.tvid_prodid}] with status: [{self._status}]') last_update_indexer = datetime.date.fromordinal(self._last_update_indexer) @@ -2105,16 +2103,13 @@ class TVShow(TVShowBase): for cur_row in sql_result: if (cur_row['season'], cur_row['episode']) in processed: continue - logger.log('%s: Retrieving/creating episode %sx%s' - % (self.tvid_prodid, cur_row['season'], cur_row['episode']), logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: Retrieving/creating episode {cur_row["season"]}x{cur_row["episode"]}') ep_obj = self.get_episode(cur_row['season'], cur_row['episode'], ep_result=[cur_row]) if not ep_obj.related_ep_obj: processed += [(cur_row['season'], cur_row['episode'])] else: - logger.log('%s: Found related to %sx%s episode(s)... %s' - % (self.tvid_prodid, cur_row['season'], cur_row['episode'], - ', '.join(['%sx%s' % (x.season, x.episode) for x in ep_obj.related_ep_obj])), - logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: Found related to {cur_row["season"]}x{cur_row["episode"]} episode(s)' + f'... {", ".join(["%sx%s" % (x.season, x.episode) for x in ep_obj.related_ep_obj])}') processed += list(set([(cur_row['season'], cur_row['episode'])] + [(x.season, x.episode) for x in ep_obj.related_ep_obj])) ep_obj.create_meta_files(force) @@ -2159,14 +2154,14 @@ class TVShow(TVShowBase): parse_result = None ep_obj = None - logger.log('%s: Creating episode from %s' % (self.tvid_prodid, cur_media_file), logger.DEBUG) + logger.debug('%s: Creating episode from %s' % (self.tvid_prodid, cur_media_file)) try: ep_obj = self.ep_obj_from_file(os.path.join(self._location, cur_media_file)) except (exceptions_helper.ShowNotFoundException, exceptions_helper.EpisodeNotFoundException) as e: - logger.log('Episode %s returned an exception: %s' % (cur_media_file, ex(e)), logger.ERROR) + logger.error('Episode %s returned an exception: %s' % (cur_media_file, ex(e))) continue except exceptions_helper.EpisodeDeletedException: - logger.log('The episode deleted itself when I tried making an object for it', logger.DEBUG) + logger.debug('The episode deleted itself when I tried making an object for it') if None is ep_obj: continue @@ -2183,9 +2178,7 @@ class TVShow(TVShowBase): pass if ep_file_name and parse_result and None is not parse_result.release_group and not ep_obj.release_name: - logger.log( - 'Name %s gave release group of %s, seems valid' % (ep_file_name, parse_result.release_group), - logger.DEBUG) + logger.debug(f'Name {ep_file_name} gave release group of {parse_result.release_group}, seems valid') ep_obj.release_name = ep_file_name # store the reference in the show @@ -2194,8 +2187,8 @@ class TVShow(TVShowBase): try: ep_obj.refresh_subtitles() except (BaseException, Exception): - logger.log('%s: Could not refresh subtitles' % self.tvid_prodid, logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('%s: Could not refresh subtitles' % self.tvid_prodid) + logger.error(traceback.format_exc()) result = ep_obj.get_sql() if None is not result: @@ -2238,8 +2231,7 @@ class TVShow(TVShowBase): try: cached_show = t.get_show(self.prodid, language=self._lang) except BaseTVinfoError as e: - logger.log('Unable to find cached seasons from %s: %s' % ( - sickgear.TVInfoAPI(self.tvid).name, ex(e)), logger.WARNING) + logger.warning(f'Unable to find cached seasons from {sickgear.TVInfoAPI(self.tvid).name}: {ex(e)}') if None is cached_show: return scanned_eps @@ -2264,14 +2256,14 @@ class TVShow(TVShowBase): try: cached_seasons[season] = cached_show[season] except BaseTVinfoSeasonnotfound as e: - logger.log('Error when trying to load the episode for [%s] from %s: %s' % - (self._name, sickgear.TVInfoAPI(self.tvid).name, ex(e)), logger.WARNING) + logger.warning(f'Error when trying to load the episode for [{self._name}]' + f' from {sickgear.TVInfoAPI(self.tvid).name}: {ex(e)}') delete_ep = True if season not in scanned_eps: scanned_eps[season] = {} - logger.log('Loading episode %sx%s for [%s] from the DB' % (season, episode, self.name), logger.DEBUG) + logger.debug('Loading episode %sx%s for [%s] from the DB' % (season, episode, self.name)) try: ep_obj = self.get_episode(season, episode, ep_result=[cur_row]) # type: TVEpisode @@ -2285,8 +2277,8 @@ class TVShow(TVShowBase): ep_obj.load_from_tvinfo(tvapi=t, update=update, cached_show=cached_show) scanned_eps[season][episode] = True except exceptions_helper.EpisodeDeletedException: - logger.log('Tried loading an episode that should have been deleted from the DB [%s], skipping it' - % self._name, logger.DEBUG) + logger.debug(f'Tried loading an episode that should have been deleted from the DB [{self._name}],' + f' skipping it') continue if cl: @@ -2338,9 +2330,8 @@ class TVShow(TVShowBase): t = sickgear.TVInfoAPI(self.tvid).setup(**tvinfo_config) show_obj = t.get_show(self.prodid, language=self._lang) except BaseTVinfoError: - logger.log('%s timed out, unable to update episodes for [%s] from %s' % - (sickgear.TVInfoAPI(self.tvid).name, self._name, sickgear.TVInfoAPI(self.tvid).name), - logger.ERROR) + logger.error(f'{sickgear.TVInfoAPI(self.tvid).name} timed out,' + f' unable to update episodes for [{self._name}] from {sickgear.TVInfoAPI(self.tvid).name}') return None scanned_eps = {} @@ -2374,9 +2365,8 @@ class TVShow(TVShowBase): continue with ep_obj.lock: - logger.log('%s: Loading info from %s for episode %sx%s from [%s]' % - (self.tvid_prodid, sickgear.TVInfoAPI(self.tvid).name, season, episode, self._name), - logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: Loading info from {sickgear.TVInfoAPI(self.tvid).name}' + f' for episode {season}x{episode} from [{self._name}]') ep_obj.load_from_tvinfo(season, episode, tvapi=t, update=update, cached_show=show_obj, switch=switch, old_tvid=old_tvid, old_prodid=old_prodid, switch_list=sql_l) @@ -2403,7 +2393,7 @@ class TVShow(TVShowBase): for cur_provider in itervalues(sickgear.metadata_provider_dict): # FIXME: Needs to not show this message if the option is not enabled? - logger.log('Running metadata routines for %s' % cur_provider.name, logger.DEBUG) + logger.debug('Running metadata routines for %s' % cur_provider.name) fanart_result = cur_provider.create_fanart(self) or fanart_result poster_result = cur_provider.create_poster(self) or poster_result @@ -2429,21 +2419,21 @@ class TVShow(TVShowBase): logger.log('%s: Not a real file... %s' % (self.tvid_prodid, path)) return None - logger.log('%s: Creating episode object from %s' % (self.tvid_prodid, path), logger.DEBUG) + logger.debug('%s: Creating episode object from %s' % (self.tvid_prodid, path)) try: my_parser = NameParser(show_obj=self) parse_result = my_parser.parse(path) except InvalidNameException: - logger.log('Unable to parse the filename %s into a valid episode' % path, logger.DEBUG) + logger.debug('Unable to parse the filename %s into a valid episode' % path) return None except InvalidShowException: - logger.log('Unable to parse the filename %s into a valid show' % path, logger.DEBUG) + logger.debug('Unable to parse the filename %s into a valid show' % path) return None if not len(parse_result.episode_numbers): logger.log('parse_result: %s' % parse_result) - logger.log('No episode number found in %s, ignoring it' % path, logger.ERROR) + logger.error('No episode number found in %s, ignoring it' % path) return None # for now let's assume that any episode in the show dir belongs to that show @@ -2455,8 +2445,7 @@ class TVShow(TVShowBase): for cur_ep_num in episode_numbers: cur_ep_num = int(cur_ep_num) - logger.log('%s: %s parsed to %s %sx%s' % (self.tvid_prodid, path, self._name, season_number, cur_ep_num), - logger.DEBUG) + logger.debug('%s: %s parsed to %s %sx%s' % (self.tvid_prodid, path, self._name, season_number, cur_ep_num)) check_quality_again = False same_file = False @@ -2466,7 +2455,7 @@ class TVShow(TVShowBase): try: ep_obj = self.get_episode(season_number, cur_ep_num, path) except exceptions_helper.EpisodeNotFoundException: - logger.log('%s: Unable to figure out what this file is, skipping' % self.tvid_prodid, logger.ERROR) + logger.error('%s: Unable to figure out what this file is, skipping' % self.tvid_prodid) continue else: @@ -2479,8 +2468,8 @@ class TVShow(TVShowBase): if (ep_obj.location and os.path.normpath(ep_obj.location) != os.path.normpath(path)) or \ (not ep_obj.location and path) or \ (SKIPPED == status): - logger.log('The old episode had a different file associated with it, re-checking the quality ' + - 'based on the new filename %s' % path, logger.DEBUG) + logger.debug('The old episode had a different file associated with it, re-checking the quality ' + 'based on the new filename %s' % path) check_quality_again = True with ep_obj.lock: @@ -2509,8 +2498,8 @@ class TVShow(TVShowBase): new_quality = Quality.name_quality(path, self.is_anime) if Quality.UNKNOWN == new_quality: new_quality = Quality.file_quality(path) - logger.log('Since this file was renamed, file %s was checked and quality "%s" found' - % (path, Quality.qualityStrings[new_quality]), logger.DEBUG) + logger.debug(f'Since this file was renamed, file {path}' + f' was checked and quality "{Quality.qualityStrings[new_quality]}" found') status, quality = sickgear.common.Quality.split_composite_status(ep_obj.status) if Quality.UNKNOWN != new_quality or status in (SKIPPED, UNAIRED): ep_obj.status = Quality.composite_status(DOWNLOADED, new_quality) @@ -2530,18 +2519,16 @@ class TVShow(TVShowBase): # if it was snatched and now exists then set the status correctly if SNATCHED == old_status and old_quality <= new_quality: - logger.log('STATUS: this episode used to be snatched with quality %s but' - ' a file exists with quality %s so setting the status to DOWNLOADED' - % (Quality.qualityStrings[old_quality], Quality.qualityStrings[new_quality]), - logger.DEBUG) + logger.debug(f'STATUS: this episode used to be snatched with quality' + f' {Quality.qualityStrings[old_quality]} but a file exists with quality' + f' {Quality.qualityStrings[new_quality]} so setting the status to DOWNLOADED') new_status = DOWNLOADED # if it was snatched proper, and we found a higher quality one then allow the status change elif SNATCHED_PROPER == old_status and old_quality < new_quality: - logger.log('STATUS: this episode used to be snatched proper with quality %s but' - ' a file exists with quality %s so setting the status to DOWNLOADED' - % (Quality.qualityStrings[old_quality], Quality.qualityStrings[new_quality]), - logger.DEBUG) + logger.debug(f'STATUS: this episode used to be snatched proper with quality' + f' {Quality.qualityStrings[old_quality]} but a file exists with quality' + f' {Quality.qualityStrings[new_quality]} so setting the status to DOWNLOADED') new_status = DOWNLOADED elif old_status not in SNATCHED_ANY: @@ -2549,8 +2536,8 @@ class TVShow(TVShowBase): if None is not new_status: with ep_obj.lock: - logger.log('STATUS: we have an associated file, so setting the status from %s to DOWNLOADED/%s' - % (ep_obj.status, Quality.composite_status(new_status, new_quality)), logger.DEBUG) + logger.debug(f'STATUS: we have an associated file, so setting the status from {ep_obj.status}' + f' to DOWNLOADED/{Quality.composite_status(new_status, new_quality)}') ep_obj.status = Quality.composite_status(new_status, new_quality) elif same_file: @@ -2559,8 +2546,8 @@ class TVShow(TVShowBase): new_quality = Quality.name_quality(path, self.is_anime) if Quality.UNKNOWN == new_quality: new_quality = Quality.file_quality(path) - logger.log('Since this file has status: "%s", file %s was checked and quality "%s" found' - % (statusStrings[status], path, Quality.qualityStrings[new_quality]), logger.DEBUG) + logger.debug(f'Since this file has status: "{statusStrings[status]}", file {path}' + f' was checked and quality "{Quality.qualityStrings[new_quality]}" found') ep_obj.status = Quality.composite_status(DOWNLOADED, new_quality) with ep_obj.lock: @@ -2672,7 +2659,7 @@ class TVShow(TVShowBase): self.release_groups = self._anime and AniGroupList(self.tvid, self.prodid, self.tvid_prodid) or None - logger.log(u'Loaded.. {: <9} {: <8} {}'.format( + logger.log('Loaded.. {: <9} {: <8} {}'.format( sickgear.TVInfoAPI(self.tvid).config.get('name') + ',', '%s,' % self.prodid, self.name)) # Get IMDb_info from database @@ -2697,8 +2684,7 @@ class TVShow(TVShowBase): if 'is_mini_series' in self._imdb_info: self._imdb_info['is_mini_series'] = bool(self._imdb_info['is_mini_series']) elif sickgear.USE_IMDB_INFO: - logger.log('%s: The next show update will attempt to find IMDb info for [%s]' % - (self.tvid_prodid, self.name), logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: The next show update will attempt to find IMDb info for [{self.name}]') return self.dirty = False @@ -2799,9 +2785,9 @@ class TVShow(TVShowBase): if None is show_info or getattr(t, 'show_not_found', False): if getattr(t, 'show_not_found', False): self.inc_not_found_count() - logger.log('Show [%s] not found (maybe even removed?)' % self._name, logger.WARNING) + logger.warning('Show [%s] not found (maybe even removed?)' % self._name) else: - logger.log('Show data [%s] not found' % self._name, logger.WARNING) + logger.warning('Show data [%s] not found' % self._name) return False self.reset_not_found_count() @@ -2961,8 +2947,8 @@ class TVShow(TVShowBase): try: old_person_ids.remove(existing_person.id) except KeyError: - logger.log('%s - Person error: %s (%s)' % - (self.name, existing_person.name, existing_person.id), logger.ERROR) + logger.error(f'{self.name} -' + f' Person error: {existing_person.name} ({existing_person.id})') pass if force: existing_person.reset(src_person) @@ -3025,12 +3011,12 @@ class TVShow(TVShowBase): if not sickgear.USE_IMDB_INFO: return - logger.log('Retrieving show info [%s] from IMDb' % self._name, logger.DEBUG) + logger.debug('Retrieving show info [%s] from IMDb' % self._name) try: self._get_imdb_info() except (BaseException, Exception) as e: - logger.log('Error loading IMDb info: %s' % ex(e), logger.ERROR) - logger.log('%s' % traceback.format_exc(), logger.ERROR) + logger.error('Error loading IMDb info: %s' % ex(e)) + logger.error('%s' % traceback.format_exc()) @staticmethod def check_imdb_redirect(imdb_id): @@ -3079,7 +3065,7 @@ class TVShow(TVShowBase): imdb_info['imdb_id'] = self.imdbid i = imdbpie.Imdb(exclude_episodes=True, cachedir=os.path.join(sickgear.CACHE_DIR, 'imdb-pie')) if not helpers.parse_imdb_id(imdb_id): - logger.log('Not a valid imdbid: %s for show: %s' % (imdb_id, self._name), logger.WARNING) + logger.warning('Not a valid imdbid: %s for show: %s' % (imdb_id, self._name)) return imdb_ratings = i.get_title_ratings(imdb_id=imdb_id) imdb_akas = i.get_title_versions(imdb_id=imdb_id) @@ -3087,8 +3073,8 @@ class TVShow(TVShowBase): ipie = getattr(imdbpie.__dict__.get('imdbpie'), '_SIMPLE_GET_ENDPOINTS', None) if ipie: ipie.update({ - u'get_title_certificates': u'/title/{imdb_id}/certificates', - u'get_title_parentalguide': u'/title/{imdb_id}/parentalguide', + 'get_title_certificates': '/title/{imdb_id}/certificates', + 'get_title_parentalguide': '/title/{imdb_id}/parentalguide', }) imdb_certificates = i.get_title_certificates(imdb_id=imdb_id) except LookupError as e: @@ -3099,17 +3085,17 @@ class TVShow(TVShowBase): indexermapper.map_indexers_to_show(self, force=True) if not retry and imdb_id != 'tt%07d' % self.ids[indexermapper.TVINFO_IMDB]['id']: # add retry arg to prevent endless loops - logger.log('imdbid: %s not found. retrying with newly found id: %s' % - (imdb_id, 'tt%07d' % self.ids[indexermapper.TVINFO_IMDB]['id']), logger.DEBUG) + logger.debug(f'imdbid: {imdb_id} not found. retrying with newly found id:' + f' {"tt%07d" % self.ids[indexermapper.TVINFO_IMDB]["id"]}') self._get_imdb_info(retry=True) return - logger.log('imdbid: %s not found. Error: %s' % (imdb_id, ex(e)), logger.WARNING) + logger.warning('imdbid: %s not found. Error: %s' % (imdb_id, ex(e))) return except ImdbAPIError as e: - logger.log('Imdb API Error: %s' % ex(e), logger.WARNING) + logger.warning('Imdb API Error: %s' % ex(e)) return except (BaseException, Exception) as e: - logger.log('Error: %s retrieving imdb id: %s' % (ex(e), imdb_id), logger.WARNING) + logger.warning('Error: %s retrieving imdb id: %s' % (ex(e), imdb_id)) return # ratings @@ -3180,19 +3166,19 @@ class TVShow(TVShowBase): imdb_info['certificates'] = '|'.join([cert for cert in itervalues(certs_head) if cert] + sorted(certs_tail)) if (not imdb_info['certificates'] and isinstance(imdb_tv.get('certificate'), dict) and isinstance(imdb_tv.get('certificate').get('certificate'), string_types)): - imdb_info['certificates'] = '%s:%s' % (u'US', imdb_tv.get('certificate').get('certificate')) + imdb_info['certificates'] = f'US:{imdb_tv.get("certificate").get("certificate")}' imdb_info['last_update'] = datetime.date.today().toordinal() # Rename dict keys without spaces for DB upsert self.imdb_info = dict( [(k.replace(' ', '_'), k(v) if hasattr(v, 'keys') else v) for k, v in iteritems(imdb_info)]) - logger.log('%s: Obtained info from IMDb -> %s' % (self.tvid_prodid, self._imdb_info), logger.DEBUG) + logger.debug('%s: Obtained info from IMDb -> %s' % (self.tvid_prodid, self._imdb_info)) logger.log('%s: Parsed latest IMDb show info for [%s]' % (self.tvid_prodid, self._name)) def next_episode(self): - logger.log('%s: Finding the episode which airs next for: %s' % (self.tvid_prodid, self._name), logger.DEBUG) + logger.debug('%s: Finding the episode which airs next for: %s' % (self.tvid_prodid, self._name)) cur_date = datetime.date.today().toordinal() if not self.nextaired or self.nextaired and cur_date > self.nextaired: @@ -3208,11 +3194,10 @@ class TVShow(TVShowBase): """, [self.tvid, self.prodid, datetime.date.today().toordinal(), UNAIRED, WANTED, FAILED]) if None is sql_result or 0 == len(sql_result): - logger.log('%s: No episode found... need to implement a show status' % self.tvid_prodid, logger.DEBUG) + logger.debug('%s: No episode found... need to implement a show status' % self.tvid_prodid) self.nextaired = '' else: - logger.log('%s: Found episode %sx%s' % ( - self.tvid_prodid, sql_result[0]['season'], sql_result[0]['episode']), logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: Found episode {sql_result[0]["season"]}x{sql_result[0]["episode"]}') self.nextaired = sql_result[0]['airdate'] return self.nextaired @@ -3298,20 +3283,20 @@ class TVShow(TVShowBase): file_attribute = os.stat(self.location)[0] if not file_attribute & stat.S_IWRITE: # File is read-only, so make it writeable - logger.log('Attempting to make writeable the read only folder %s' % self._location, logger.DEBUG) + logger.debug('Attempting to make writeable the read only folder %s' % self._location) try: os.chmod(self.location, stat.S_IWRITE) except (BaseException, Exception): - logger.log('Unable to change permissions of %s' % self._location, logger.WARNING) + logger.warning('Unable to change permissions of %s' % self._location) result = helpers.remove_file(self.location, tree=True) if result: logger.log('%s show folder %s' % (result, self._location)) except exceptions_helper.ShowDirNotFoundException: - logger.log('Show folder does not exist, no need to %s %s' % (action, self._location), logger.WARNING) + logger.warning('Show folder does not exist, no need to %s %s' % (action, self._location)) except OSError as e: - logger.log('Unable to %s %s: %s / %s' % (action, self._location, repr(e), ex(e)), logger.WARNING) + logger.warning('Unable to %s %s: %s / %s' % (action, self._location, repr(e), ex(e))) def populate_cache(self, force=False): # type: (bool) -> None @@ -3359,8 +3344,8 @@ class TVShow(TVShowBase): try: ep_obj = self.get_episode(season, episode, ep_result=[cur_row]) except exceptions_helper.EpisodeDeletedException: - logger.log('The episode from [%s] was deleted while we were refreshing it, moving on to the next one' - % self._name, logger.DEBUG) + logger.debug(f'The episode from [{self._name}] was deleted while we were refreshing it,' + f' moving on to the next one') continue # if the path exist and if it's in our show dir @@ -3371,10 +3356,9 @@ class TVShow(TVShowBase): # locations repeat but attempt to delete once attempted += ep_obj.location if kept >= self.prune: - result = helpers.remove_file(ep_obj.location, prefix_failure=u'%s: ' % self.tvid_prodid) + result = helpers.remove_file(ep_obj.location, prefix_failure=f'{self.tvid_prodid}: ') if result: - logger.log(u'%s: %s file %s' % (self.tvid_prodid, result, ep_obj.location), - logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: {result} file {ep_obj.location}') deleted += 1 else: kept += 1 @@ -3394,10 +3378,9 @@ class TVShow(TVShowBase): else: ep_obj.status = (sickgear.SKIP_REMOVED_FILES, IGNORED)[ not sickgear.SKIP_REMOVED_FILES] - logger.log( - '%s: File no longer at location for s%02de%02d,' % (self.tvid_prodid, season, episode) - + ' episode removed and status changed to %s' % statusStrings[ep_obj.status], - logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: File no longer at location for' + f' s{season:02d}e{episode:02d}, episode removed' + f' and status changed to {statusStrings[ep_obj.status]}') ep_obj.subtitles = list() ep_obj.subtitles_searchcount = 0 ep_obj.subtitles_lastsearch = str(datetime.datetime.min) @@ -3431,9 +3414,9 @@ class TVShow(TVShowBase): """ # TODO: Add support for force option if not os.path.isdir(self._location): - logger.log('%s: Show directory doesn\'t exist, can\'t download subtitles' % self.tvid_prodid, logger.DEBUG) + logger.debug('%s: Show directory doesn\'t exist, can\'t download subtitles' % self.tvid_prodid) return - logger.log('%s: Downloading subtitles' % self.tvid_prodid, logger.DEBUG) + logger.debug('%s: Downloading subtitles' % self.tvid_prodid) try: my_db = db.DBConnection() @@ -3449,7 +3432,7 @@ class TVShow(TVShowBase): ep_obj = self.ep_obj_from_file(cur_row['location']) _ = ep_obj.download_subtitles(force=force) except (BaseException, Exception): - logger.log('Error occurred when downloading subtitles: %s' % traceback.format_exc(), logger.ERROR) + logger.error('Error occurred when downloading subtitles: %s' % traceback.format_exc()) return def remove_character_images(self): @@ -3535,8 +3518,7 @@ class TVShow(TVShowBase): try: os.rename(old_dir, new_dir) except (BaseException, Exception) as e: - logger.log('Unable to rename %s to %s: %s / %s' % (old_dir, new_dir, repr(e), ex(e)), - logger.WARNING) + logger.warning('Unable to rename %s to %s: %s / %s' % (old_dir, new_dir, repr(e), ex(e))) old_id = TVidProdid({old_tvid: old_prodid})() rating = sickgear.FANART_RATINGS.get(old_id) @@ -3563,7 +3545,7 @@ class TVShow(TVShowBase): self, force=True, web=True, priority=QueuePriorities.VERYHIGH, pausestatus_after=pausestatus_after, switch_src=True) except exceptions_helper.CantUpdateException as e: - logger.log('Unable to update this show. %s' % ex(e), logger.ERROR) + logger.error('Unable to update this show. %s' % ex(e)) def save_to_db(self, force_save=False): # type: (bool) -> None @@ -3572,10 +3554,10 @@ class TVShow(TVShowBase): :param force_save: """ if not self.dirty and not force_save: - logger.log('%s: Not saving show to db - record is not dirty' % self.tvid_prodid, logger.DEBUG) + logger.debug('%s: Not saving show to db - record is not dirty' % self.tvid_prodid) return - logger.log('%s: Saving show info to database' % self.tvid_prodid, logger.DEBUG) + logger.debug('%s: Saving show info to database' % self.tvid_prodid) new_value_dict = dict( air_by_date=self._air_by_date, @@ -3672,8 +3654,8 @@ class TVShow(TVShowBase): :param multi_ep: multiple episodes :return: """ - logger.log('Checking if found %sepisode %sx%s is wanted at quality %s' % - (('', 'multi-part ')[multi_ep], season, episode, Quality.qualityStrings[quality]), logger.DEBUG) + logger.debug(f'Checking if found {("", "multi-part ")[multi_ep]}episode {season}x{episode}' + f' is wanted at quality {Quality.qualityStrings[quality]}') if not multi_ep: try: @@ -3682,19 +3664,19 @@ class TVShow(TVShowBase): if quality in wq: cur_status, cur_quality = Quality.split_composite_status(self.sxe_ep_obj[season][episode].status) if cur_status in (WANTED, UNAIRED, SKIPPED, FAILED): - logger.log('Existing episode status is wanted/unaired/skipped/failed,' - ' getting found episode', logger.DEBUG) + logger.debug('Existing episode status is wanted/unaired/skipped/failed,' + ' getting found episode') return True elif manual_search: - logger.log('Usually ignoring found episode, but forced search allows the quality,' - ' getting found episode', logger.DEBUG) + logger.debug('Usually ignoring found episode, but forced search allows the quality,' + ' getting found episode') return True elif quality > cur_quality: - logger.log( - 'Episode already exists but the found episode has better quality,' - ' getting found episode', logger.DEBUG) + logger.debug('Episode already exists but the found episode has better quality,' + ' getting found episode') return True - logger.log('None of the conditions were met, ignoring found episode', logger.DEBUG) + logger.debug('None of the conditions were met,' + ' ignoring found episode') return False except (BaseException, Exception): pass @@ -3707,10 +3689,11 @@ class TVShow(TVShowBase): if 0 < len(archive_qualities): initial = '+ upgrade to %s + (%s)'\ % (initial, ','.join([Quality.qualityStrings[qual] for qual in archive_qualities])) - logger.log('Want initial %s and found %s' % (initial, Quality.qualityStrings[quality]), logger.DEBUG) + logger.debug('Want initial %s and found %s' % (initial, Quality.qualityStrings[quality])) if quality not in all_qualities: - logger.log('Don\'t want this quality, ignoring found episode', logger.DEBUG) + logger.debug('Don\'t want this quality,' + ' ignoring found episode') return False my_db = db.DBConnection() @@ -3722,34 +3705,33 @@ class TVShow(TVShowBase): """, [self.tvid, self.prodid, season, episode]) if not sql_result or not len(sql_result): - logger.log('Unable to find a matching episode in database, ignoring found episode', logger.DEBUG) + logger.debug('Unable to find a matching episode in database,' + ' ignoring found episode') return False cur_status, cur_quality = Quality.split_composite_status(int(sql_result[0]['status'])) ep_status_text = statusStrings[cur_status] - logger.log('Existing episode status: %s (%s)' % (statusStrings[cur_status], ep_status_text), logger.DEBUG) + logger.debug('Existing episode status: %s (%s)' % (statusStrings[cur_status], ep_status_text)) # if we know we don't want it then just say no if cur_status in [IGNORED, ARCHIVED] + ([SKIPPED], [])[multi_ep] and not manual_search: - logger.log('Existing episode status is %signored/archived, ignoring found episode' % - ('skipped/', '')[multi_ep], logger.DEBUG) + logger.debug(f'Existing episode status is {("skipped/", "")[multi_ep]}ignored/archived,' + f' ignoring found episode') return False # if it's one of these then we want it as long as it's in our allowed initial qualities if quality in all_qualities: if cur_status in [WANTED, UNAIRED, SKIPPED, FAILED] + ([], SNATCHED_ANY)[multi_ep]: - logger.log('Existing episode status is wanted/unaired/skipped/failed, getting found episode', - logger.DEBUG) + logger.debug('Existing episode status is wanted/unaired/skipped/failed,' + ' getting found episode') return True elif manual_search: - logger.log( - 'Usually ignoring found episode, but forced search allows the quality, getting found episode', - logger.DEBUG) + logger.debug('Usually ignoring found episode, but forced search allows the quality,' + ' getting found episode') return True else: - logger.log('Quality is on wanted list, need to check if it\'s better than existing quality', - logger.DEBUG) + logger.debug('Quality is on wanted list, need to check if it\'s better than existing quality') downloaded_status_list = SNATCHED_ANY + [DOWNLOADED] # special case: already downloaded quality is not in any of the wanted Qualities @@ -3760,14 +3742,14 @@ class TVShow(TVShowBase): # if re-downloading then only keep items in the archiveQualities list and better than what we have if cur_status in downloaded_status_list and quality in wanted_qualities and quality > cur_quality: - logger.log('Episode already exists but the found episode has better quality, getting found episode', - logger.DEBUG) + logger.debug('Episode already exists but the found episode has better quality,' + ' getting found episode') return True else: - logger.log('Episode already exists and the found episode has same/lower quality, ignoring found episode', - logger.DEBUG) + logger.debug('Episode already exists and the found episode has same/lower quality,' + ' ignoring found episode') - logger.log('None of the conditions were met, ignoring found episode', logger.DEBUG) + logger.debug('None of the conditions were met, ignoring found episode') return False def get_overview(self, ep_status, split_snatch=False): @@ -3942,7 +3924,7 @@ class TVEpisode(TVEpisodeBase): def _set_location(self, val): log_vals = (('clears', ''), ('sets', ' to ' + val))[any(val)] # noinspection PyStringFormat - logger.log(u'Setter %s location%s' % log_vals, logger.DEBUG) + logger.debug('Setter %s location%s' % log_vals) # self._location = newLocation self.dirty_setter('_location')(self, val) @@ -3972,11 +3954,10 @@ class TVEpisode(TVEpisodeBase): # TODO: Add support for force option if not os.path.isfile(self.location): - logger.log('%s: Episode file doesn\'t exist, can\'t download subtitles for episode %sx%s' % - (self.show_obj.tvid_prodid, self.season, self.episode), logger.DEBUG) + logger.debug(f'{self.show_obj.tvid_prodid}: Episode file doesn\'t exist,' + f' can\'t download subtitles for episode {self.season}x{self.episode}') return - logger.log('%s: Downloading subtitles for episode %sx%s' - % (self.show_obj.tvid_prodid, self.season, self.episode), logger.DEBUG) + logger.debug(f'{self.show_obj.tvid_prodid}: Downloading subtitles for episode {self.season}x{self.episode}') previous_subtitles = self.subtitles @@ -3993,7 +3974,7 @@ class TVEpisode(TVEpisodeBase): subs_new_path = os.path.join(os.path.dirname(video.path), sickgear.SUBTITLES_DIR) dir_exists = helpers.make_dir(subs_new_path) if not dir_exists: - logger.log('Unable to create subtitles folder %s' % subs_new_path, logger.ERROR) + logger.error('Unable to create subtitles folder %s' % subs_new_path) else: helpers.chmod_as_parent(subs_new_path) @@ -4007,7 +3988,7 @@ class TVEpisode(TVEpisodeBase): helpers.chmod_as_parent(subtitle.path) except (BaseException, Exception): - logger.log('Error occurred when downloading subtitles: %s' % traceback.format_exc(), logger.ERROR) + logger.error('Error occurred when downloading subtitles: %s' % traceback.format_exc()) return self.refresh_subtitles() @@ -4022,17 +4003,17 @@ class TVEpisode(TVEpisodeBase): try: subtitle_list = ", ".join([subliminal.language.Language(x).name for x in newsubtitles]) except (BaseException, Exception): - logger.log('Could not parse a language to use to fetch subtitles for episode %sx%s' % - (self.season, self.episode), logger.DEBUG) + logger.debug(f'Could not parse a language to use to fetch subtitles' + f' for episode {self.season}x{self.episode}') return - logger.log('%s: Downloaded %s subtitles for episode %sx%s' % - (self.show_obj.tvid_prodid, subtitle_list, self.season, self.episode), logger.DEBUG) + logger.debug(f'{self.show_obj.tvid_prodid}: Downloaded {subtitle_list} subtitles' + f' for episode {self.season}x{self.episode}') notifiers.notify_subtitle_download(self, subtitle_list) else: - logger.log('%s: No subtitles downloaded for episode %sx%s' - % (self.show_obj.tvid_prodid, self.season, self.episode), logger.DEBUG) + logger.debug(f'{self.show_obj.tvid_prodid}: No subtitles downloaded' + f' for episode {self.season}x{self.episode}') if sickgear.SUBTITLES_HISTORY: for video in subs: @@ -4092,8 +4073,8 @@ class TVEpisode(TVEpisodeBase): try: self.load_from_nfo(self.location) except exceptions_helper.NoNFOException: - logger.log('%s: There was an error loading the NFO for episode %sx%s' % - (self.show_obj.tvid_prodid, season, episode), logger.ERROR) + logger.error(f'{self.show_obj.tvid_prodid}: There was an error loading the NFO' + f' for episode {season}x{episode}') pass # if we tried loading it from NFO and didn't find the NFO, try the Indexers @@ -4118,8 +4099,7 @@ class TVEpisode(TVEpisodeBase): :param episode: episode number :param show_result: """ - logger.log('%s: Loading episode details from DB for episode %sx%s' - % (self._show_obj.tvid_prodid, season, episode), logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}: Loading episode details from DB for episode {season}x{episode}') show_result = show_result and next(iter(show_result), None) if not show_result or episode != show_result['episode'] or season != show_result['season']: @@ -4136,8 +4116,8 @@ class TVEpisode(TVEpisodeBase): if len(sql_result): raise exceptions_helper.MultipleDBEpisodesException('DB has multiple records for the same show') - logger.log('%s: Episode %sx%s not found in the database' - % (self._show_obj.tvid_prodid, self._season, self._episode), logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}: Episode {self._season}x{self._episode}' + f' not found in the database') return False show_result = next(iter(sql_result)) @@ -4259,9 +4239,8 @@ class TVEpisode(TVEpisodeBase): if None is episode: episode = self._episode - logger.log('%s: Loading episode details from %s for episode %sx%s' % - (self._show_obj.tvid_prodid, sickgear.TVInfoAPI(self._show_obj.tvid).name, season, episode), - logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}: Loading episode details from' + f' {sickgear.TVInfoAPI(self._show_obj.tvid).name} for episode {season}x{episode}') try: if cached_show: @@ -4290,35 +4269,34 @@ class TVEpisode(TVEpisodeBase): ep_info = cached_season[episode] # type: TVInfoEpisode except (BaseTVinfoEpisodenotfound, BaseTVinfoSeasonnotfound): - logger.log('Unable to find the episode on %s... has it been removed? Should I delete from db?' % - sickgear.TVInfoAPI(self.tvid).name, logger.DEBUG) - # if I'm no longer on the Indexers, but I once was then delete myself from the DB + logger.debug(f'Unable to find the episode on {sickgear.TVInfoAPI(self.tvid).name}...' + f' has it been removed? Should it be deleted from the db?') + # if no longer on the Indexers, but once was, then delete it from the DB if -1 != self._epid and helpers.should_delete_episode(self._status): self.delete_episode() elif UNKNOWN == self._status: self.status = SKIPPED return except (BaseTVinfoError, IOError) as e: - logger.log('%s threw up an error: %s' % (sickgear.TVInfoAPI(self.tvid).name, ex(e)), logger.DEBUG) + logger.debug('%s threw up an error: %s' % (sickgear.TVInfoAPI(self.tvid).name, ex(e))) # if the episode is already valid just log it, if not throw it up if UNKNOWN == self._status: self.status = SKIPPED if self._name: - logger.log('%s timed out but there is enough info from other sources, allowing the error' % - sickgear.TVInfoAPI(self.tvid).name, logger.DEBUG) + logger.debug(f'{sickgear.TVInfoAPI(self.tvid).name}' + f' timed out but there is enough info from other sources, allowing the error') return - logger.log('%s timed out, unable to create the episode' % sickgear.TVInfoAPI(self.tvid).name, - logger.ERROR) + logger.error('%s timed out, unable to create the episode' % sickgear.TVInfoAPI(self.tvid).name) return False if getattr(ep_info, 'absolute_number', None) in (None, ''): logger.debug('This episode (%s - %sx%s) has no absolute number on %s' % (self.show_obj.unique_name, season, episode, sickgear.TVInfoAPI(self.tvid).name)) else: - logger.log('%s: The absolute_number for %sx%s is : %s' % - (self._show_obj.tvid_prodid, season, episode, ep_info['absolute_number']), logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}:' + f' The absolute_number for {season}x{episode} is : {ep_info["absolute_number"]}') self.absolute_number = int(ep_info['absolute_number']) if switch and None is not switch_list: @@ -4407,7 +4385,7 @@ class TVEpisode(TVEpisodeBase): # early conversion to int so that episode doesn't get marked dirty self.epid = getattr(ep_info, 'id', None) if None is self._epid: - logger.log('Failed to retrieve ID from %s' % sickgear.TVInfoAPI(self.tvid).name, logger.ERROR) + logger.error('Failed to retrieve ID from %s' % sickgear.TVInfoAPI(self.tvid).name) if helpers.should_delete_episode(self._status): self.delete_episode() elif UNKNOWN == self._status: @@ -4427,9 +4405,8 @@ class TVEpisode(TVEpisodeBase): return if self._location: - logger.log('%s: Setting status for %sx%s based on status %s and existence of %s' % - (self._show_obj.tvid_prodid, season, episode, statusStrings[self._status], self._location), - logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}: Setting status for {season}x{episode}' + f' based on status {statusStrings[self._status]} and existence of {self._location}') # if we don't have the file if not os.path.isfile(self._location): @@ -4474,24 +4451,24 @@ class TVEpisode(TVEpisodeBase): else: msg = 'Not touching episode status %s, because there is no file' - logger.log(msg % statusStrings[self._status], logger.DEBUG) + logger.debug(msg % statusStrings[self._status]) # if we have a media file then it's downloaded elif sickgear.helpers.has_media_ext(self._location): if IGNORED == self._status: - logger.log('File exists for %sx%s, ignoring because of status %s' % - (self._season, self._episode, statusStrings[self._status]), logger.DEBUG) + logger.debug(f'File exists for {self._season}x{self._episode},' + f' ignoring because of status {statusStrings[self._status]}') # leave propers alone, you have to either post-process them or manually change them back elif self._status not in Quality.SNATCHED_ANY + Quality.DOWNLOADED + Quality.ARCHIVED: msg = '(1) Status changes from %s to ' % statusStrings[self._status] self.status = Quality.status_from_name_or_file(self._location, anime=self._show_obj.is_anime) - logger.log('%s%s' % (msg, statusStrings[self._status]), logger.DEBUG) + logger.debug('%s%s' % (msg, statusStrings[self._status])) # shouldn't get here probably else: msg = '(2) Status changes from %s to ' % statusStrings[self._status] self.status = UNKNOWN - logger.log('%s%s' % (msg, statusStrings[self._status]), logger.DEBUG) + logger.debug('%s%s' % (msg, statusStrings[self._status])) def load_from_nfo(self, location): """ @@ -4505,8 +4482,8 @@ class TVEpisode(TVEpisodeBase): % self._show_obj.tvid_prodid) return - logger.log('%s: Loading episode details from the NFO file associated with %s' - % (self.show_obj.tvid_prodid, location), logger.DEBUG) + logger.debug(f'{self.show_obj.tvid_prodid}' + f': Loading episode details from the NFO file associated with {location}') self.location = location @@ -4514,24 +4491,22 @@ class TVEpisode(TVEpisodeBase): if UNKNOWN == self._status and sickgear.helpers.has_media_ext(self.location): status_quality = Quality.status_from_name_or_file(self.location, anime=self._show_obj.is_anime) - logger.log('(3) Status changes from %s to %s' % (self._status, status_quality), logger.DEBUG) + logger.debug('(3) Status changes from %s to %s' % (self._status, status_quality)) self.status = status_quality nfo_file = sickgear.helpers.replace_extension(self.location, 'nfo') - logger.log('%s: Using NFO name %s' % (self._show_obj.tvid_prodid, nfo_file), logger.DEBUG) + logger.debug('%s: Using NFO name %s' % (self._show_obj.tvid_prodid, nfo_file)) if os.path.isfile(nfo_file): try: show_xml = etree.ElementTree(file=nfo_file) except (SyntaxError, ValueError) as e: - logger.log('Error loading the NFO, backing up the NFO and skipping for now: %s' % ex(e), - logger.ERROR) # TODO: figure out what's wrong and fix it + # TODO: figure out what's wrong and fix it + logger.error('Error loading the NFO, backing up the NFO and skipping for now: %s' % ex(e)) try: os.rename(nfo_file, '%s.old' % nfo_file) except (BaseException, Exception) as e: - logger.log( - 'Failed to rename your episode\'s NFO file - you need to delete it or fix it: %s' % ex(e), - logger.ERROR) + logger.error(f'Failed to rename episode\'s NFO file - you need to delete it or fix it: {ex(e)}') raise exceptions_helper.NoNFOException('Error in NFO format') # TODO: deprecated function getiterator needs to be replaced @@ -4540,10 +4515,10 @@ class TVEpisode(TVEpisodeBase): if None is epDetails.findtext('season') or int(epDetails.findtext('season')) != self._season or \ None is epDetails.findtext('episode') or int( epDetails.findtext('episode')) != self._episode: - logger.log('%s: NFO has an block for a different episode - wanted %sx%s' - ' but got %sx%s' % - (self._show_obj.tvid_prodid, self._season, self._episode, - epDetails.findtext('season'), epDetails.findtext('episode')), logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}' + f': NFO has an block for a different episode - wanted' + f' {self._season}x{self._episode}' + f' but got {epDetails.findtext("season")}x{epDetails.findtext("episode")}') continue if None is epDetails.findtext('title') or None is epDetails.findtext('aired'): @@ -4664,11 +4639,11 @@ class TVEpisode(TVEpisodeBase): # remove myself from the show dictionary if self.show_obj.get_episode(self._season, self._episode, no_create=True) == self: - logger.log('Removing myself from my show\'s list', logger.DEBUG) + logger.debug('Removing myself from my show\'s list') del self.show_obj.sxe_ep_obj[self._season][self._episode] # delete myself from the DB - logger.log('Deleting myself from the database', logger.DEBUG) + logger.debug('Deleting myself from the database') sql = [['DELETE FROM tv_episodes WHERE indexer = ? AND showid = ? AND season = ? AND episode = ?', [self._show_obj.tvid, self._show_obj.prodid, self._season, self._episode]]] @@ -4690,7 +4665,7 @@ class TVEpisode(TVEpisodeBase): """ if not self.dirty and not force_save: - logger.log('%s: Not creating SQL queue - record is not dirty' % self._show_obj.tvid_prodid, logger.DEBUG) + logger.debug('%s: Not creating SQL queue - record is not dirty' % self._show_obj.tvid_prodid) return self.dirty = False @@ -4750,12 +4725,12 @@ class TVEpisode(TVEpisodeBase): """ if not self.dirty and not force_save: - logger.log('%s: Not saving episode to db - record is not dirty' % self._show_obj.tvid_prodid, logger.DEBUG) + logger.debug('%s: Not saving episode to db - record is not dirty' % self._show_obj.tvid_prodid) return - logger.log('%s: Saving episode details to database' % self._show_obj.tvid_prodid, logger.DEBUG) + logger.debug('%s: Saving episode details to database' % self._show_obj.tvid_prodid) - logger.log('STATUS IS %s' % statusStrings[self._status], logger.DEBUG) + logger.debug('STATUS IS %s' % statusStrings[self._status]) new_value_dict = dict( absolute_number=self._absolute_number, @@ -4908,7 +4883,7 @@ class TVEpisode(TVEpisodeBase): np = NameParser(name, show_obj=show_obj, naming_pattern=True) parse_result = np.parse(name) except (InvalidNameException, InvalidShowException) as e: - logger.log('Unable to get parse release_group: %s' % ex(e), logger.DEBUG) + logger.debug('Unable to get parse release_group: %s' % ex(e)) return '' if not parse_result.release_group: @@ -5005,7 +4980,7 @@ class TVEpisode(TVEpisodeBase): result_name = result_name.replace('%RG', 'SickGear') result_name = result_name.replace('%rg', 'SickGear') - logger.log('Episode has no release name, replacing it with a generic one: %s' % result_name, logger.DEBUG) + logger.debug('Episode has no release name, replacing it with a generic one: %s' % result_name) if not replace_map['%RT']: result_name = re.sub('([ _.-]*)%RT([ _.-]*)', r'\2', result_name) @@ -5113,14 +5088,14 @@ class TVEpisode(TVEpisodeBase): # fill out the template for this piece and then insert this piece into the actual pattern cur_name_group_result = re.sub('(?i)(?x)' + regex_used, regex_replacement, cur_name_group) # cur_name_group_result = cur_name_group.replace(ep_format, ep_string) - # logger.log(u"found "+ep_format+" as the ep pattern using "+regex_used+" + # logger.debug("found "+ep_format+" as the ep pattern using "+regex_used+" # and replaced it with "+regex_replacement+" to result in "+cur_name_group_result+" - # from "+cur_name_group, logger.DEBUG) + # from "+cur_name_group) result_name = result_name.replace(cur_name_group, cur_name_group_result) result_name = self._format_string(result_name, replace_map) - logger.log('formatting pattern: %s -> %s' % (pattern, result_name), logger.DEBUG) + logger.debug('formatting pattern: %s -> %s' % (pattern, result_name)) return result_name @@ -5196,7 +5171,7 @@ class TVEpisode(TVEpisodeBase): """ if not os.path.isfile(self.location): - logger.log('Can\'t perform rename on %s when it doesn\'t exist, skipping' % self.location, logger.WARNING) + logger.warning('Can\'t perform rename on %s when it doesn\'t exist, skipping' % self.location) return proper_path = self.proper_path() @@ -5211,13 +5186,11 @@ class TVEpisode(TVEpisodeBase): if absolute_current_path_no_ext.startswith(self._show_obj.location): current_path = absolute_current_path_no_ext[len(self._show_obj.location):] - logger.log('Renaming/moving episode from the base path %s to %s' % (self.location, absolute_proper_path), - logger.DEBUG) + logger.debug('Renaming/moving episode from the base path %s to %s' % (self.location, absolute_proper_path)) # if it's already named correctly then don't do anything if proper_path == current_path: - logger.log('%s: File %s is already named correctly, skipping' % (self._epid, self.location), - logger.DEBUG) + logger.debug('%s: File %s is already named correctly, skipping' % (self._epid, self.location)) return related_files = postProcessor.PostProcessor(self.location).list_associated_files( @@ -5228,7 +5201,7 @@ class TVEpisode(TVEpisodeBase): subtitles_only=True) # absolute_proper_subs_path = os.path.join(sickgear.SUBTITLES_DIR, self.formatted_filename()) - logger.log('Files associated to %s: %s' % (self.location, related_files), logger.DEBUG) + logger.debug('Files associated to %s: %s' % (self.location, related_files)) # move the ep file result = helpers.rename_ep_file(self.location, absolute_proper_path, absolute_current_path_no_ext_length) @@ -5238,14 +5211,14 @@ class TVEpisode(TVEpisodeBase): renamed = helpers.rename_ep_file(cur_related_file, absolute_proper_path, absolute_current_path_no_ext_length) if not renamed: - logger.log('%s: Unable to rename file %s' % (self._epid, cur_related_file), logger.ERROR) + logger.error('%s: Unable to rename file %s' % (self._epid, cur_related_file)) for cur_related_sub in related_subs: absolute_proper_subs_path = os.path.join(sickgear.SUBTITLES_DIR, self.formatted_filename()) renamed = helpers.rename_ep_file(cur_related_sub, absolute_proper_subs_path, absolute_current_path_no_ext_length) if not renamed: - logger.log('%s: Unable to rename file %s' % (self._epid, cur_related_sub), logger.ERROR) + logger.error('%s: Unable to rename file %s' % (self._epid, cur_related_sub)) # save the ep with self.lock: @@ -5278,8 +5251,9 @@ class TVEpisode(TVEpisodeBase): """ has_timestamp = isinstance(self._timestamp, int) and 0 != self._timestamp if not has_timestamp and (not isinstance(self._airdate, datetime.date) or 1 == self._airdate.year): - logger.log('%s: Did not change modify date of %s because episode date is never aired or invalid' - % (self._show_obj.tvid_prodid, os.path.basename(self.location)), logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}' + f': Did not change modify date of {os.path.basename(self.location)}' + f' because episode date is never aired or invalid') return aired_dt = None diff --git a/sickgear/tv_base.py b/sickgear/tv_base.py index d00dc284..0d4c8f45 100644 --- a/sickgear/tv_base.py +++ b/sickgear/tv_base.py @@ -42,8 +42,8 @@ class TVBase(object): setattr(self, attr_name, val) self.dirty = True else: - logger.log('Didn\'t change property "%s" because expected: %s, but got: %s with value: %s' % - (attr_name, types, type(val), val), logger.WARNING) + logger.warning(f'Didn\'t change property "{attr_name}" because expected: {types},' + f' but got: {type(val)} with value: {val}') return wrapper diff --git a/sickgear/tvcache.py b/sickgear/tvcache.py index cdcb4b8a..3bab265e 100644 --- a/sickgear/tvcache.py +++ b/sickgear/tvcache.py @@ -90,7 +90,7 @@ class TVCache(object): try: self.check_auth() except AuthException as e: - logger.log(u'Authentication error: ' + ex(e), logger.ERROR) + logger.error(f'Authentication error: {ex(e)}') return [] if self.should_update(): @@ -130,7 +130,7 @@ class TVCache(object): :return: :rtype: AnyStr """ - return u'' + title.replace(' ', '.') + return f'{title.replace(" ", ".")}' @staticmethod def _translate_link_url(url): @@ -159,8 +159,7 @@ class TVCache(object): return self.add_cache_entry(title, url) - logger.log('Data returned from the %s feed is incomplete, this result is unusable' % self.provider.name, - logger.DEBUG) + logger.debug('Data returned from the %s feed is incomplete, this result is unusable' % self.provider.name) def _get_last_update(self): """ @@ -276,7 +275,7 @@ class TVCache(object): parser = NameParser(show_obj=show_obj, convert=True, indexer_lookup=False) parse_result = parser.parse(name) except InvalidNameException: - logger.log('Unable to parse the filename %s into a valid episode' % name, logger.DEBUG) + logger.debug('Unable to parse the filename %s into a valid episode' % name) return except InvalidShowException: return @@ -312,7 +311,7 @@ class TVCache(object): # get version version = parse_result.version - logger.log('Add to cache: [%s]' % name, logger.DEBUG) + logger.debug('Add to cache: [%s]' % name) return [ 'INSERT OR IGNORE INTO provider_cache' @@ -406,7 +405,7 @@ class TVCache(object): # skip if provider is anime only and show is not anime if self.provider.anime_only and not show_obj.is_anime: - logger.debug(u'%s is not an anime, skipping' % show_obj.unique_name) + logger.debug(f'{show_obj.unique_name} is not an anime, skipping') continue # get season and ep data (ignoring multi-eps for now) @@ -424,8 +423,8 @@ class TVCache(object): # if the show says we want that episode then add it to the list if not show_obj.want_episode(season, ep_obj_list, quality, manual_search): - logger.log(u'Skipping ' + cur_result['name'] + ' because we don\'t want an episode that\'s ' + - Quality.qualityStrings[quality], logger.DEBUG) + logger.debug(f"Skipping {cur_result['name']}" + f" because we don't want an episode that's {Quality.qualityStrings[quality]}") continue ep_obj = show_obj.get_episode(season, ep_obj_list) @@ -434,7 +433,7 @@ class TVCache(object): title = cur_result['name'] url = cur_result['url'] - logger.log(u'Found result ' + title + ' at ' + url) + logger.log(f'Found result {title} at {url}') result = self.provider.get_result([ep_obj], url) if None is result: diff --git a/sickgear/version_checker.py b/sickgear/version_checker.py index 7da64b0e..aee6ccd1 100644 --- a/sickgear/version_checker.py +++ b/sickgear/version_checker.py @@ -277,17 +277,17 @@ class GitUpdateManager(UpdateManager): def _find_working_git(self): - logger.debug(u'Checking if git commands are available') + logger.debug('Checking if git commands are available') main_git = (sickgear.GIT_PATH, 'git')[not sickgear.GIT_PATH] _, _, exit_status = self._git_version(main_git) if 0 == exit_status: - logger.debug(u'Using: %s' % main_git) + logger.debug(f'Using: {main_git}') return main_git - logger.debug(u'Git not found: %s' % main_git) + logger.debug(f'Git not found: {main_git}') # trying alternatives @@ -301,12 +301,12 @@ class GitUpdateManager(UpdateManager): if main_git != main_git.lower(): alt_git_paths.append(main_git.lower()) if sickgear.GIT_PATH: - logger.debug(u'git.exe is missing, remove `git_path` from config.ini: %s' % main_git) + logger.debug(f'git.exe is missing, remove `git_path` from config.ini: {main_git}') if re.search(r' \(x86\)', main_git): alt_git_paths.append(re.sub(r' \(x86\)', '', main_git)) else: alt_git_paths.append(re.sub('Program Files', 'Program Files (x86)', main_git)) - logger.debug(u'Until `git_path` is removed by a config.ini edit, trying: %s' % alt_git_paths[-1]) + logger.debug(f'Until `git_path` is removed by a config.ini edit, trying: {alt_git_paths[-1]}') if alt_git_paths: logger.debug('Trying known alternative git locations') @@ -315,9 +315,9 @@ class GitUpdateManager(UpdateManager): _, _, exit_status = self._git_version(cur_git_path) if 0 == exit_status: - logger.debug(u'Using: %s' % cur_git_path) + logger.debug(f'Using: {cur_git_path}') return cur_git_path - logger.debug(u'Not using: %s' % cur_git_path) + logger.debug(f'Not using: {cur_git_path}') # Still haven't found a working git error_message = 'Unable to find your git executable - Shutdown SickGear and EITHER set git_path' \ @@ -337,15 +337,15 @@ class GitUpdateManager(UpdateManager): git_path = self._git_path if not git_path: - logger.error(u'No git specified, cannot use git commands') + logger.error('No git specified, cannot use git commands') return output, err, exit_status cmd = ' '.join([git_path] + arg_list) try: - logger.debug(u'Executing %s with your shell in %s' % (cmd, sickgear.PROG_DIR)) + logger.debug(f'Executing {cmd} with your shell in {sickgear.PROG_DIR}') output, err, exit_status = cmdline_runner([git_path] + arg_list, env={'LANG': 'en_US.UTF-8'}) - logger.debug(u'git output: %s' % output) + logger.debug(f'git output: {output}') except OSError: logger.log('Failed command: %s' % cmd) @@ -354,12 +354,12 @@ class GitUpdateManager(UpdateManager): logger.log('Failed command: %s, %s' % (cmd, ex(e))) if 0 == exit_status: - logger.debug(u'Successful return: %s' % cmd) + logger.debug(f'Successful return: {cmd}') exit_status = 0 self.unsafe = False elif 1 == exit_status: - logger.error(u'Failed: %s returned: %s' % (cmd, output)) + logger.error(f'Failed: {cmd} returned: {output}') elif 128 == exit_status or 'fatal:' in output or err: if 'unsafe repository' not in output and 'fatal:' in output: @@ -382,14 +382,14 @@ class GitUpdateManager(UpdateManager): except (BaseException, Exception): pass exit_status = 128 - msg = u'Fatal: %s returned: %s' % (cmd, output) + msg = f'Fatal: {cmd} returned: {output}' if 'develop' in output.lower() or 'main' in output.lower(): logger.error(msg) else: logger.debug(msg) else: - logger.error(u'Treat as error for now, command: %s returned: %s' % (cmd, output)) + logger.error(f'Treat as error for now, command: {cmd} returned: {output}') return output, err, exit_status @@ -405,7 +405,7 @@ class GitUpdateManager(UpdateManager): if 0 == exit_status and output: cur_commit_hash = output.strip() if not re.match(r'^[a-z0-9]+$', cur_commit_hash): - logger.error(u'Output doesn\'t look like a hash, not using it') + logger.error("Output doesn't look like a hash, not using it") return False self._cur_commit_hash = cur_commit_hash sickgear.CUR_COMMIT_HASH = str(cur_commit_hash) @@ -434,7 +434,7 @@ class GitUpdateManager(UpdateManager): _, _, exit_status = self._run_git(['fetch', '%s' % sickgear.GIT_REMOTE]) if 0 != exit_status: - logger.error(u'Unable to contact github, can\'t check for update') + logger.error("Unable to contact github, can't check for update") return if not self._cur_pr_number: @@ -446,14 +446,14 @@ class GitUpdateManager(UpdateManager): cur_commit_hash = output.strip() if not re.match('^[a-z0-9]+$', cur_commit_hash): - logger.debug(u'Output doesn\'t look like a hash, not using it') + logger.debug("Output doesn't look like a hash, not using it") return self._newest_commit_hash = cur_commit_hash self._old_commit_hash = cur_commit_hash self._old_branch = self._find_installed_branch() else: - logger.debug(u'git didn\'t return newest commit hash') + logger.debug("git didn't return newest commit hash") return # get number of commits behind and ahead (option --count not supported git < 1.7.2) @@ -466,11 +466,13 @@ class GitUpdateManager(UpdateManager): self._num_commits_ahead = int(output.count('>')) except (BaseException, Exception): - logger.debug(u'git didn\'t return numbers for behind and ahead, not using it') + logger.debug("git didn't return numbers for behind and ahead, not using it") return - logger.debug(u'cur_commit = %s, newest_commit = %s, num_commits_behind = %s, num_commits_ahead = %s' % ( - self._cur_commit_hash, self._newest_commit_hash, self._num_commits_behind, self._num_commits_ahead)) + logger.debug(f'cur_commit = {self._cur_commit_hash}' + f', newest_commit = {self._newest_commit_hash}' + f', num_commits_behind = {self._num_commits_behind}' + f', num_commits_ahead = {self._num_commits_ahead}') else: # we need to treat pull requests specially as it doesn't seem possible to set their "@{upstream}" tag output, _, _ = self._run_git(['ls-remote', '%s' % sickgear.GIT_REMOTE, @@ -512,7 +514,7 @@ class GitUpdateManager(UpdateManager): installed_branch = self._find_installed_branch() if self.branch != installed_branch: - logger.debug(u'Branch checkout: %s->%s' % (installed_branch, self.branch)) + logger.debug(f'Branch checkout: {installed_branch}->{self.branch}') return True self._find_installed_version() @@ -524,7 +526,7 @@ class GitUpdateManager(UpdateManager): try: self._check_github_for_update() except (BaseException, Exception) as e: - logger.error(u'Unable to contact github, can\'t check for update: %r' % e) + logger.error(f"Unable to contact github, can't check for update: {e!r}") return False if 0 < self._num_commits_behind: @@ -661,12 +663,12 @@ class SourceUpdateManager(UpdateManager): try: self._check_github_for_update() except (BaseException, Exception) as e: - logger.error(u'Unable to contact github, can\'t check for update: %r' % e) + logger.error(f"Unable to contact github, can't check for update: {e!r}") return False installed_branch = self._find_installed_branch() if self.branch != installed_branch: - logger.debug(u'Branch checkout: %s->%s' % (installed_branch, self.branch)) + logger.debug(f'Branch checkout: {installed_branch}->{self.branch}') return True if not self._cur_commit_hash or 0 < self._num_commits_behind: @@ -712,8 +714,9 @@ class SourceUpdateManager(UpdateManager): # when _cur_commit_hash doesn't match anything _num_commits_behind == 100 self._num_commits_behind += 1 - logger.debug(u'cur_commit = %s, newest_commit = %s, num_commits_behind = %s' - % (self._cur_commit_hash, self._newest_commit_hash, self._num_commits_behind)) + logger.debug(f'cur_commit = {self._cur_commit_hash}' + f', newest_commit = {self._newest_commit_hash}' + f', num_commits_behind = {self._num_commits_behind}') def set_newest_text(self): @@ -721,7 +724,7 @@ class SourceUpdateManager(UpdateManager): newest_text = None if not self._cur_commit_hash: - logger.debug(u'Unknown current version number, don\'t know if we should update or not') + logger.debug("Unknown current version number, don't know if we should update or not") newest_text = 'Unknown current version number: If you\'ve never used the SickGear upgrade system' \ ' before then current version is not set. — Update Now' \ @@ -751,48 +754,48 @@ class SourceUpdateManager(UpdateManager): try: # prepare the update dir - sg_update_dir = os.path.join(sickgear.PROG_DIR, u'sg-update') + sg_update_dir = os.path.join(sickgear.PROG_DIR, 'sg-update') if os.path.isdir(sg_update_dir): - logger.log(u'Clearing out update folder %s before extracting' % sg_update_dir) + logger.log(f'Clearing out update folder {sg_update_dir} before extracting') shutil.rmtree(sg_update_dir) - logger.log(u'Creating update folder %s before extracting' % sg_update_dir) + logger.log(f'Creating update folder {sg_update_dir} before extracting') os.makedirs(sg_update_dir) # retrieve file - logger.log(u'Downloading update from %r' % tar_download_url) - tar_download_path = os.path.join(sg_update_dir, u'sg-update.tar') + logger.log(f'Downloading update from {tar_download_url!r}') + tar_download_path = os.path.join(sg_update_dir, 'sg-update.tar') urllib.request.urlretrieve(tar_download_url, tar_download_path) if not os.path.isfile(tar_download_path): - logger.error(u'Unable to retrieve new version from %s, can\'t update' % tar_download_url) + logger.error(f"Unable to retrieve new version from {tar_download_url}, can't update") return False if not tarfile.is_tarfile(tar_download_path): - logger.error(u'Retrieved version from %s is corrupt, can\'t update' % tar_download_url) + logger.error(f"Retrieved version from {tar_download_url} is corrupt, can't update") return False # extract to sg-update dir - logger.log(u'Extracting file %s' % tar_download_path) + logger.log(f'Extracting file {tar_download_path}') tar = tarfile.open(tar_download_path) tar.extractall(sg_update_dir) tar.close() # delete .tar.gz - logger.log(u'Deleting file %s' % tar_download_path) + logger.log(f'Deleting file {tar_download_path}') os.remove(tar_download_path) # find update dir name update_dir_contents = [x for x in os.listdir(sg_update_dir) if os.path.isdir(os.path.join(sg_update_dir, x))] if 1 != len(update_dir_contents): - logger.error(u'Invalid update data, update failed: %s' % update_dir_contents) + logger.error(f'Invalid update data, update failed: {update_dir_contents}') return False content_dir = os.path.join(sg_update_dir, update_dir_contents[0]) # walk temp folder and move files to main folder - logger.log(u'Moving files from %s to %s' % (content_dir, sickgear.PROG_DIR)) + logger.log(f'Moving files from {content_dir} to {sickgear.PROG_DIR}') for dirname, dirnames, filenames in os.walk(content_dir): dirname = dirname[len(content_dir) + 1:] for curfile in filenames: @@ -808,7 +811,7 @@ class SourceUpdateManager(UpdateManager): os.remove(new_path) os.renames(old_path, new_path) except (BaseException, Exception) as e: - logger.debug(u'Unable to update %s: %s' % (new_path, ex(e))) + logger.debug(f'Unable to update {new_path}: {ex(e)}') os.remove(old_path) # Trash the updated file without moving in new path continue @@ -820,8 +823,8 @@ class SourceUpdateManager(UpdateManager): sickgear.CUR_COMMIT_BRANCH = self.branch except (BaseException, Exception) as e: - logger.error(u'Error while trying to update: %s' % ex(e)) - logger.debug(u'Traceback: %s' % traceback.format_exc()) + logger.error(f'Error while trying to update: {ex(e)}') + logger.debug(f'Traceback: {traceback.format_exc()}') return False # Notify update successful diff --git a/sickgear/watchedstate_queue.py b/sickgear/watchedstate_queue.py index 81c8d614..c7449506 100644 --- a/sickgear/watchedstate_queue.py +++ b/sickgear/watchedstate_queue.py @@ -56,7 +56,7 @@ class WatchedStateQueue(generic_queue.GenericQueue): # plex watched state item generic_queue.GenericQueue.add_item(self, item) else: - logger.log(u'Not adding item, it\'s already in the queue', logger.DEBUG) + logger.debug("Not adding item, it's already in the queue") class EmbyWatchedStateQueueItem(generic_queue.QueueItem): diff --git a/sickgear/webapi.py b/sickgear/webapi.py index b91e1625..f75c426a 100644 --- a/sickgear/webapi.py +++ b/sickgear/webapi.py @@ -253,7 +253,7 @@ class Api(webserve.BaseHandler): result = function(*ag) return result except Exception as e: - logger.log(ex(e), logger.ERROR) + logger.error(ex(e)) raise e def _out_as_json(self, dict): @@ -277,17 +277,17 @@ class Api(webserve.BaseHandler): self.apikey_name = '' if not sickgear.USE_API: - msg = u'%s - SB API Disabled. ACCESS DENIED' % remoteIp + msg = f'{remoteIp} - SB API Disabled. ACCESS DENIED' return False, msg, args, kwargs if not apiKey: - msg = u'%s - gave NO API KEY. ACCESS DENIED' % remoteIp + msg = f'{remoteIp} - gave NO API KEY. ACCESS DENIED' return False, msg, args, kwargs for realKey in realKeys: if apiKey == realKey[1]: self.apikey_name = realKey[0] - msg = u'%s - gave correct API KEY: %s. ACCESS GRANTED' % (remoteIp, realKey[0]) + msg = f'{remoteIp} - gave correct API KEY: {realKey[0]}. ACCESS GRANTED' return True, msg, args, kwargs - msg = u'%s - gave WRONG API KEY %s. ACCESS DENIED' % (remoteIp, apiKey) + msg = f'{remoteIp} - gave WRONG API KEY {apiKey}. ACCESS DENIED' return False, msg, args, kwargs @@ -306,10 +306,10 @@ def call_dispatcher(handler, args, kwargs): cmds = kwargs["cmd"] del kwargs["cmd"] - api_log(handler, u"cmd: '" + str(cmds) + "'", logger.DEBUG) - api_log(handler, u"all args: '" + str(args) + "'", logger.DEBUG) - api_log(handler, u"all kwargs: '" + str(kwargs) + "'", logger.DEBUG) - # logger.log(u"dateFormat: '" + str(dateFormat) + "'", logger.DEBUG) + api_log(handler, f'cmd: "{cmds}"', logger.DEBUG) + api_log(handler, f'all args: "{args}"', logger.DEBUG) + api_log(handler, f'all kwargs: "{kwargs}"', logger.DEBUG) + # logger.debug(f'dateFormat: "{dateFormat}"') outDict = {} @@ -626,14 +626,11 @@ class ApiCall(object): elif "ignore" == type: pass else: - self.log(u"Invalid param type set " + str(type) + " can not check or convert ignoring it", - logger.ERROR) + self.log(f"Invalid param type set {type} can not check or convert ignoring it", logger.ERROR) if error: # this is a real ApiError !! - raise ApiError( - u"param: '" + str(name) + "' with given value: '" + str(value) + "' could not be parsed into '" + str( - type) + "'") + raise ApiError(f'param: "{name}" with given value: "{value}" could not be parsed into "{type}"') return value @@ -654,8 +651,7 @@ class ApiCall(object): if error: # this is kinda a ApiError but raising an error is the only way of quitting here - raise ApiError(u"param: '" + str(name) + "' with given value: '" + str( - value) + "' is out of allowed range '" + str(allowedValues) + "'") + raise ApiError(f'param: "{name}" with given value: "{value}" is out of allowed range "{allowedValues}"') class TVDBShorthandWrapper(ApiCall): @@ -1369,8 +1365,8 @@ class CMD_SickGearEpisodeSetStatus(ApiCall): backlog_queue_item = search_queue.BacklogQueueItem(show_obj, segment) sickgear.search_queue_scheduler.action.add_item(backlog_queue_item) - self.log(u'Starting backlog for %s season %s because some episodes were set to WANTED' % - (show_obj.unique_name, season)) + self.log(f'Starting backlog for {show_obj.unique_name} season {season}' + f' because some episodes were set to WANTED') extra_msg = " Backlog started" @@ -3336,7 +3332,7 @@ class CMD_SickGearShowAddExisting(ApiCall): try: myShow = t[int(self.prodid), False] except BaseTVinfoError as e: - self.log(u"Unable to find show with id " + str(self.tvid), logger.WARNING) + self.log(f'Unable to find show with id {self.tvid}', logger.WARNING) return _responds(RESULT_FAILURE, msg="Unable to retrieve information from indexer") indexerName = None @@ -3499,7 +3495,7 @@ class CMD_SickGearShowAddNew(ApiCall): try: myShow = t[int(self.prodid), False] except BaseTVinfoError as e: - self.log(u"Unable to find show with id " + str(self.tvid), logger.WARNING) + self.log(f'Unable to find show with id {self.tvid}', logger.WARNING) return _responds(RESULT_FAILURE, msg="Unable to retrieve information from indexer") indexerName = None @@ -3520,11 +3516,11 @@ class CMD_SickGearShowAddNew(ApiCall): # don't create show dir if config says not to if sickgear.ADD_SHOWS_WO_DIR: - self.log(u"Skipping initial creation of " + showPath + " due to config.ini setting") + self.log(f'Skipping initial creation of {showPath} due to config.ini setting') else: dir_exists = helpers.make_dir(showPath) if not dir_exists: - self.log(u"Unable to create the folder " + showPath + ", can't add the show", logger.ERROR) + self.log(f"Unable to create the folder {showPath}, can't add the show", logger.ERROR) return _responds(RESULT_FAILURE, {"path": showPath}, "Unable to create the folder " + showPath + ", can't add the show") else: @@ -4440,7 +4436,7 @@ class CMD_SickGearShowUpdate(ApiCall): sickgear.show_queue_scheduler.action.update_show(show_obj, True) return _responds(RESULT_SUCCESS, msg='%s has queued to be updated' % show_obj.unique_name) except exceptions_helper.CantUpdateException as e: - self.log(u'Unable to update %s. %s' % (show_obj.unique_name, ex(e)), logger.ERROR) + self.log(f'Unable to update {show_obj.unique_name}. {ex(e)}', logger.ERROR) return _responds(RESULT_FAILURE, msg='Unable to update %s. %s' % (show_obj.unique_name, ex(e))) diff --git a/sickgear/webserve.py b/sickgear/webserve.py index 5b83341e..1a97fe8e 100644 --- a/sickgear/webserve.py +++ b/sickgear/webserve.py @@ -167,12 +167,12 @@ class BaseStaticFileHandler(StaticFileHandler): body = '\nRequest body: %s' % decode_str(self.request.body) except (BaseException, Exception): pass - logger.log('Sent %s error response to a `%s` request for `%s` with headers:\n%s%s' % - (status_code, self.request.method, self.request.path, self.request.headers, body), logger.WARNING) + logger.warning(f'Sent {status_code} error response to a `{self.request.method}`' + f' request for `{self.request.path}` with headers:\n' + f'{self.request.headers}{body}') # suppress traceback by removing 'exc_info' kwarg if 'exc_info' in kwargs: - logger.log('Gracefully handled exception text:\n%s' % traceback.format_exception(*kwargs["exc_info"]), - logger.DEBUG) + logger.debug('Gracefully handled exception text:\n%s' % traceback.format_exception(*kwargs["exc_info"])) del kwargs['exc_info'] return super(BaseStaticFileHandler, self).write_error(status_code, **kwargs) @@ -228,12 +228,11 @@ class RouteHandler(RequestHandler): body = '\nRequest body: %s' % decode_str(self.request.body) except (BaseException, Exception): pass - logger.log('Sent %s error response to a `%s` request for `%s` with headers:\n%s%s' % - (status_code, self.request.method, self.request.path, self.request.headers, body), logger.WARNING) + logger.warning(f'Sent {status_code} error response to a `{self.request.method}`' + f' request for `{self.request.path}` with headers:\n{self.request.headers}{body}') # suppress traceback by removing 'exc_info' kwarg if 'exc_info' in kwargs: - logger.log('Gracefully handled exception text:\n%s' % traceback.format_exception(*kwargs["exc_info"]), - logger.DEBUG) + logger.debug('Gracefully handled exception text:\n%s' % traceback.format_exception(*kwargs["exc_info"])) del kwargs['exc_info'] return super(RouteHandler, self).write_error(status_code, **kwargs) @@ -432,7 +431,7 @@ class CalendarHandler(BaseHandler): Works with iCloud, Google Calendar and Outlook. Provides a subscribeable URL for iCal subscriptions """ - logger.log(u'Receiving iCal request from %s' % self.request.remote_ip) + logger.log(f'Receiving iCal request from {self.request.remote_ip}') # Limit dates past_date = (datetime.date.today() + datetime.timedelta(weeks=-52)).toordinal() @@ -472,21 +471,17 @@ class CalendarHandler(BaseHandler): minutes=helpers.try_int(show['runtime'], 60)) # Create event for episode - ical += 'BEGIN:VEVENT%s' % crlf \ - + 'DTSTART:%sT%sZ%s' % (air_date_time.strftime('%Y%m%d'), - air_date_time.strftime('%H%M%S'), crlf) \ - + 'DTEND:%sT%sZ%s' % (air_date_time_end.strftime('%Y%m%d'), - air_date_time_end.strftime('%H%M%S'), crlf) \ - + u'SUMMARY:%s - %sx%s - %s%s' % (show['show_name'], episode['season'], episode['episode'], - episode['name'], crlf) \ - + u'UID:%s-%s-%s-E%sS%s%s' % (appname, datetime.date.today().isoformat(), - show['show_name'].replace(' ', '-'), - episode['episode'], episode['season'], crlf) \ - + u'DESCRIPTION:%s on %s' % ((show['airs'] or '(Unknown airs)'), - (show['network'] or 'Unknown network')) \ - + ('' if not episode['description'] - else u'%s%s' % (nl, episode['description'].splitlines()[0])) \ - + '%sEND:VEVENT%s' % (crlf, crlf) + desc = '' if not episode['description'] else f'{nl}{episode["description"].splitlines()[0]}' + ical += (f'BEGIN:VEVENT{crlf}' + f'DTSTART:{air_date_time.strftime("%Y%m%d")}T{air_date_time.strftime("%H%M%S")}Z{crlf}' + f'DTEND:{air_date_time_end.strftime("%Y%m%d")}T{air_date_time_end.strftime("%H%M%S")}Z{crlf}' + f'SUMMARY:{show["show_name"]} - {episode["season"]}x{episode["episode"]}' + f' - {episode["name"]}{crlf}' + f'UID:{appname}-{datetime.date.today().isoformat()}-{show["show_name"].replace(" ", "-")}' + f'-E{episode["episode"]}S{episode["season"]}{crlf}' + f'DESCRIPTION:{(show["airs"] or "(Unknown airs)")} on {(show["network"] or "Unknown network")}' + f'{desc}{crlf}' + f'END:VEVENT{crlf}') # Ending the iCal return ical + 'END:VCALENDAR' @@ -499,7 +494,7 @@ class RepoHandler(BaseStaticFileHandler): kodi_is_legacy = None def parse_url_path(self, url_path): - logger.log('Kodi req... get(path): %s' % url_path, logger.DEBUG) + logger.debug('Kodi req... get(path): %s' % url_path) return super(RepoHandler, self).parse_url_path(url_path) def set_extra_headers(self, *args, **kwargs): @@ -514,7 +509,7 @@ class RepoHandler(BaseStaticFileHandler): super(RepoHandler, self).initialize(*args, **kwargs) - logger.log('Kodi req... initialize(path): %s' % kwargs['path'], logger.DEBUG) + logger.debug('Kodi req... initialize(path): %s' % kwargs['path']) cache_client = os.path.join(sickgear.CACHE_DIR, 'clients') cache_client_kodi = os.path.join(cache_client, 'kodi') cache_client_kodi_watchedstate = os.path.join(cache_client_kodi, 'service.sickgear.watchedstate.updater') @@ -583,7 +578,7 @@ class RepoHandler(BaseStaticFileHandler): # Force a UNIX line ending, like the md5sum utility. with io.open(os.path.join(zip_path, '%s.md5' % zip_name), 'w', newline='\n') as zh: - zh.write(u'%s *%s\n' % (self.md5ify(zip_data), zip_name)) + zh.write(f'{self.md5ify(zip_data)} *{zip_name}\n') aid, ver = self.repo_sickgear_details() save_zip(aid, ver, os.path.join(cache_client_kodi, 'repository.sickgear'), @@ -739,7 +734,7 @@ class RepoHandler(BaseStaticFileHandler): def md5ify(string): if not isinstance(string, binary_type): string = string.encode('utf-8') - return u'%s' % hashlib.new('md5', string).hexdigest() + return f'{hashlib.new("md5", string).hexdigest()}' def kodi_repository_sickgear_zip(self): bfr = io.BytesIO() @@ -753,7 +748,7 @@ class RepoHandler(BaseStaticFileHandler): infile = fh.read() zh.writestr('repository.sickgear/icon.png', infile, zipfile.ZIP_DEFLATED) except OSError as e: - logger.log('Unable to zip: %r / %s' % (e, ex(e)), logger.WARNING) + logger.warning('Unable to zip: %r / %s' % (e, ex(e))) zip_data = bfr.getvalue() bfr.close() @@ -792,7 +787,7 @@ class RepoHandler(BaseStaticFileHandler): zh.writestr(os.path.relpath(direntry.path.replace(self.kodi_legacy, ''), basepath), infile, zipfile.ZIP_DEFLATED) except OSError as e: - logger.log('Unable to zip %s: %r / %s' % (direntry.path, e, ex(e)), logger.WARNING) + logger.warning('Unable to zip %s: %r / %s' % (direntry.path, e, ex(e))) zip_data = bfr.getvalue() bfr.close() @@ -1466,7 +1461,7 @@ r.close() if not bname: msg = 'Missing media file name provided' data[k] = msg - logger.log('Update watched state skipped an item: %s' % msg, logger.WARNING) + logger.warning('Update watched state skipped an item: %s' % msg) continue if bname in ep_results: @@ -1494,7 +1489,7 @@ r.close() if as_json: if not data: data = dict(error='Request made to SickGear with invalid payload') - logger.log('Update watched state failed: %s' % data['error'], logger.WARNING) + logger.warning('Update watched state failed: %s' % data['error']) return json_dumps(data) @@ -1628,13 +1623,13 @@ class Home(MainHandler): images_path = os.path.join(sickgear.PROG_DIR, 'gui', 'slick', 'images', 'network') for cur_show_obj in sickgear.showList: network_name = 'nonetwork' if None is cur_show_obj.network \ - else cur_show_obj.network.replace(u'\u00C9', 'e').lower() + else cur_show_obj.network.replace('\u00C9', 'e').lower() if network_name not in networks: - filename = u'%s.png' % network_name + filename = f'{network_name}.png' if not os.path.isfile(os.path.join(images_path, filename)): - filename = u'%s.png' % re.sub(r'(?m)(.*)\s+\(\w{2}\)$', r'\1', network_name) + filename = '%s.png' % re.sub(r'(?m)(.*)\s+\(\w{2}\)$', r'\1', network_name) if not os.path.isfile(os.path.join(images_path, filename)): - filename = u'nonetwork.png' + filename = 'nonetwork.png' networks.setdefault(network_name, filename) t.network_images.setdefault(cur_show_obj.tvid_prodid, networks[network_name]) @@ -1690,10 +1685,10 @@ class Home(MainHandler): authed, auth_msg = sab.test_authentication(host, username, password, apikey) if authed: - return u'Success. Connected %s authentication' % \ - ('using %s' % access_msg, 'with no')['None' == auth_msg.lower()] - return u'Authentication failed. %s' % auth_msg - return u'Unable to connect to host' + return f'Success. Connected' \ + f' {(f"using {access_msg}", "with no")["None" == auth_msg.lower()]} authentication' + return f'Authentication failed. {auth_msg}' + return 'Unable to connect to host' def test_nzbget(self, host=None, use_https=None, username=None, password=None): self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') @@ -2022,10 +2017,10 @@ class Home(MainHandler): def check_update(self): # force a check to see if there is a new version if sickgear.update_software_scheduler.action.check_for_new_version(force=True): - logger.log(u'Forced version check found results') + logger.log('Forced version check found results') if sickgear.update_packages_scheduler.action.check_for_new_version(force=True): - logger.log(u'Forced package version check found results') + logger.log('Forced package version check found results') self.redirect('/home/') @@ -2591,7 +2586,7 @@ class Home(MainHandler): new_prodid=m_prodid, force_id=True, set_pause=set_pause, mark_wanted=mark_wanted) except (BaseException, Exception) as e: - logger.log('Could not add show %s to switch queue: %s' % (show_obj.tvid_prodid, ex(e)), logger.WARNING) + logger.warning('Could not add show %s to switch queue: %s' % (show_obj.tvid_prodid, ex(e))) ui.notifications.message('TV info source switch', 'Queued switch of tv info source') return {'Success': 'Switched to new TV info source'} @@ -2658,12 +2653,12 @@ class Home(MainHandler): else: msg = 'Main ID unchanged, because show from %s with ID: %s exists in DB.' % \ (sickgear.TVInfoAPI(m_tvid).name, mtvid_prodid) - logger.log(msg, logger.WARNING) + logger.warning(msg) ui.notifications.message(*[s.strip() for s in msg.split(',')]) except MultipleShowObjectsException: msg = 'Main ID unchanged, because show from %s with ID: %s exists in DB.' % \ (sickgear.TVInfoAPI(m_tvid).name, m_prodid) - logger.log(msg, logger.WARNING) + logger.warning(msg) ui.notifications.message(*[s.strip() for s in msg.split(',')]) response.update({ @@ -2943,9 +2938,9 @@ class Home(MainHandler): old_path = os.path.normpath(show_obj._location) new_path = os.path.normpath(location) if old_path != new_path: - logger.log(u'%s != %s' % (old_path, new_path), logger.DEBUG) + logger.debug(f'{old_path} != {new_path}') if not os.path.isdir(new_path) and not sickgear.CREATE_MISSING_SHOW_DIRS: - errors.append(u'New location %s does not exist' % new_path) + errors.append(f'New location {new_path} does not exist') # don't bother if we're going to update anyway elif not do_update: @@ -2960,9 +2955,8 @@ class Home(MainHandler): # show_obj.load_episodes_from_tvinfo() # rescan the episodes in the new folder except exceptions_helper.NoNFOException: - errors.append( - u"The folder at %s doesn't contain a tvshow.nfo - " - u"copy your files to that folder before you change the directory in SickGear." % new_path) + errors.append(f'The folder at {new_path} doesn"t contain a tvshow.nfo -' + f' copy your files to that folder before you change the directory in SickGear.') # save it to the DB show_obj.save_to_db() @@ -3175,7 +3169,7 @@ class Home(MainHandler): sql_l = [] for cur_ep in eps.split('|'): - logger.log(u'Attempting to set status on episode %s to %s' % (cur_ep, status), logger.DEBUG) + logger.debug(f'Attempting to set status on episode {cur_ep} to {status}') ep_obj = show_obj.get_episode(*tuple([int(x) for x in cur_ep.split('x')])) @@ -3205,7 +3199,7 @@ class Home(MainHandler): err_msg = 'to downloaded because it\'s not snatched/downloaded/archived' if err_msg: - logger.log('Refusing to change status of %s %s' % (cur_ep, err_msg), logger.ERROR) + logger.error('Refusing to change status of %s %s' % (cur_ep, err_msg)) continue if ARCHIVED == status: @@ -3239,31 +3233,31 @@ class Home(MainHandler): if season not in season_wanted: season_wanted += [season] - season_list += u'
  • Season %s
  • ' % season - logger.log((u'Not adding wanted eps to backlog search for %s season %s because show is paused', - u'Starting backlog search for %s season %s because eps were set to wanted')[ + season_list += f'
  • Season {season}
  • ' + logger.log(('Not adding wanted eps to backlog search for %s season %s because show is paused', + 'Starting backlog search for %s season %s because eps were set to wanted')[ not show_obj.paused] % (show_obj.unique_name, season)) - (title, msg) = (('Not starting backlog', u'Paused show prevented backlog search'), - ('Backlog started', u'Backlog search started'))[not show_obj.paused] + (title, msg) = (('Not starting backlog', 'Paused show prevented backlog search'), + ('Backlog started', 'Backlog search started'))[not show_obj.paused] if segments: ui.notifications.message(title, - u'%s for the following seasons of %s:
      %s
    ' - % (msg, show_obj.unique_name, season_list)) + f'{msg} for the following seasons of {show_obj.unique_name}:
    ' + f'
      {season_list}
    ') else: ui.notifications.message('Not starting backlog', 'No provider has active searching enabled') elif FAILED == status: - msg = u'Retrying search automatically for the following season of %s:
      ' % show_obj.unique_name + msg = f'Retrying search automatically for the following season of {show_obj.unique_name}:
        ' for season, segment in iteritems(segments): # type: int, List[sickgear.tv.TVEpisode] cur_failed_queue_item = search_queue.FailedQueueItem(show_obj, segment) sickgear.search_queue_scheduler.action.add_item(cur_failed_queue_item) msg += '
      • Season %s
      • ' % season - logger.log(u'Retrying search for %s season %s because some eps were set to failed' % - (show_obj.unique_name, season)) + logger.log(f'Retrying search for {show_obj.unique_name} season {season}' + f' because some eps were set to failed') msg += '
      ' @@ -3359,7 +3353,7 @@ class Home(MainHandler): tvid_prodid_obj.list + [ep_info[0], ep_info[1]]) if not sql_result: - logger.log(u'Unable to find an episode for ' + cur_ep + ', skipping', logger.WARNING) + logger.warning(f'Unable to find an episode for {cur_ep}, skipping') continue related_ep_result = my_db.select('SELECT * FROM tv_episodes WHERE location = ? AND episode != ?', [sql_result[0]['location'], ep_info[1]]) @@ -3895,8 +3889,8 @@ class HomeProcessMedia(Home): skip_failure_processing = nzbget_call and not nzbget_dupekey if nzbget_call and sickgear.NZBGET_SCRIPT_VERSION != kwargs.get('pp_version', '0'): - logger.log('Calling SickGear-NG.py script %s is not current version %s, please update.' % - (kwargs.get('pp_version', '0'), sickgear.NZBGET_SCRIPT_VERSION), logger.ERROR) + logger.error(f'Calling SickGear-NG.py script {kwargs.get("pp_version", "0")} is not current version' + f' {sickgear.NZBGET_SCRIPT_VERSION}, please update.') if sickgear.NZBGET_SKIP_PM and nzbget_call and nzbget_dupekey and nzb_name and show_obj: processTV.process_minimal(nzb_name, show_obj, @@ -3933,9 +3927,9 @@ class HomeProcessMedia(Home): regexp = re.compile(r'(?i)', flags=re.UNICODE) result = regexp.sub('\n', result) if None is not quiet and 1 == int(quiet): - regexp = re.compile(u'(?i)]+>([^<]+)', flags=re.UNICODE) - return u'%s' % regexp.sub(r'\1', result) - return self._generic_message('Postprocessing results', u'
      %s
      ' % result) + regexp = re.compile('(?i)]+>([^<]+)', flags=re.UNICODE) + return regexp.sub(r'\1', result) + return self._generic_message('Postprocessing results', f'
      {result}
      ') # noinspection PyPep8Naming def processEpisode(self, dir_name=None, nzb_name=None, process_type=None, **kwargs): @@ -5027,13 +5021,13 @@ class AddShows(Home): normalised = resp else: for item in resp: - normalised.append({u'show': item}) + normalised.append({'show': item}) del resp except TraktAuthException as e: - logger.log(u'Pin authorisation needed to connect to Trakt service: %s' % ex(e), logger.WARNING) + logger.warning(f'Pin authorisation needed to connect to Trakt service: {ex(e)}') error_msg = 'Unauthorized: Get another pin in the Notifications Trakt settings' except TraktException as e: - logger.log(u'Could not connect to Trakt service: %s' % ex(e), logger.WARNING) + logger.warning(f'Could not connect to Trakt service: {ex(e)}') except exceptions_helper.ConnectionSkipException as e: logger.log('Skipping Trakt because of previous failure: %s' % ex(e)) except (IndexError, KeyError): @@ -6031,8 +6025,7 @@ class AddShows(Home): series_pieces = which_series.split('|') if (which_series and root_dir) or (which_series and full_show_path and 1 < len(series_pieces)): if 4 > len(series_pieces): - logger.log('Unable to add show due to show selection. Not enough arguments: %s' % (repr(series_pieces)), - logger.ERROR) + logger.error(f'Unable to add show due to show selection. Not enough arguments: {repr(series_pieces)}') ui.notifications.error('Unknown error. Unable to add show due to problem with show selection.') return self.redirect('/add-shows/import/') @@ -6058,7 +6051,7 @@ class AddShows(Home): # if the dir exists, do 'add existing show' if os.path.isdir(show_dir) and not full_show_path: - ui.notifications.error('Unable to add show', u'Found existing folder: ' + show_dir) + ui.notifications.error('Unable to add show', f'Found existing folder: {show_dir}') return self.redirect( '/add-shows/import?tvid_prodid=%s%s%s&hash_dir=%s%s' % (tvid, TVidProdid.glue, prodid, re.sub('[^a-z]', '', sg_helpers.md5_for_text(show_dir)), @@ -6066,11 +6059,11 @@ class AddShows(Home): # don't create show dir if config says not to if sickgear.ADD_SHOWS_WO_DIR: - logger.log(u'Skipping initial creation due to config.ini setting (add_shows_wo_dir)') + logger.log('Skipping initial creation due to config.ini setting (add_shows_wo_dir)') else: if not helpers.make_dir(show_dir): - logger.log(u'Unable to add show because can\'t create folder: ' + show_dir, logger.ERROR) - ui.notifications.error('Unable to add show', u'Can\'t create folder: ' + show_dir) + logger.error(f"Unable to add show because can't create folder: {show_dir}") + ui.notifications.error('Unable to add show', f"Can't create folder: {show_dir}") return self.redirect('/home/') helpers.chmod_as_parent(show_dir) @@ -6880,8 +6873,7 @@ class Manage(MainHandler): base_dir = dir_map[cur_root_dir] new_show_dir = os.path.join(base_dir, cur_show_dir) # noinspection PyProtectedMember - logger.log(u'For show %s changing dir from %s to %s' % - (show_obj.unique_name, show_obj._location, new_show_dir)) + logger.log(f'For show {show_obj.unique_name} changing dir from {show_obj._location} to {new_show_dir}') else: # noinspection PyProtectedMember new_show_dir = show_obj._location @@ -6960,7 +6952,7 @@ class Manage(MainHandler): prune=new_prune, tag=new_tag, direct_call=True) if cur_errors: - logger.log(u'Errors: ' + str(cur_errors), logger.ERROR) + logger.error(f'Errors: {cur_errors}') errors.append('%s:\n
        ' % show_obj.unique_name + ' '.join( ['
      • %s
      • ' % error for error in cur_errors]) + '
      ') @@ -7086,7 +7078,7 @@ class Manage(MainHandler): new_show_id = new_show.split(':') new_tvid = int(new_show_id[0]) if new_tvid not in tv_sources: - logger.log('Skipping %s because target is not a valid source' % show, logger.WARNING) + logger.warning('Skipping %s because target is not a valid source' % show) errors.append('Skipping %s because target is not a valid source' % show) continue try: @@ -7094,7 +7086,7 @@ class Manage(MainHandler): except (BaseException, Exception): show_obj = None if not show_obj: - logger.log('Skipping %s because source is not a valid show' % show, logger.WARNING) + logger.warning('Skipping %s because source is not a valid show' % show) errors.append('Skipping %s because source is not a valid show' % show) continue if 2 == len(new_show_id): @@ -7104,21 +7096,20 @@ class Manage(MainHandler): except (BaseException, Exception): new_show_obj = None if new_show_obj: - logger.log('Skipping %s because target show with that id already exists in db' % show, - logger.WARNING) + logger.warning('Skipping %s because target show with that id already exists in db' % show) errors.append('Skipping %s because target show with that id already exists in db' % show) continue else: new_prodid = None if show_obj.tvid == new_tvid and (not new_prodid or new_prodid == show_obj.prodid): - logger.log('Skipping %s because target same as source' % show, logger.WARNING) + logger.warning('Skipping %s because target same as source' % show) errors.append('Skipping %s because target same as source' % show) continue try: sickgear.show_queue_scheduler.action.switch_show(show_obj=show_obj, new_tvid=new_tvid, new_prodid=new_prodid, force_id=force_id) except (BaseException, Exception) as e: - logger.log('Could not add show %s to switch queue: %s' % (show_obj.tvid_prodid, ex(e)), logger.WARNING) + logger.warning('Could not add show %s to switch queue: %s' % (show_obj.tvid_prodid, ex(e))) errors.append('Could not add show %s to switch queue: %s' % (show_obj.tvid_prodid, ex(e))) return json_dumps(({'result': 'success'}, {'errors': ', '.join(errors)})[0 < len(errors)]) @@ -7174,7 +7165,7 @@ class ManageSearch(Manage): # force it to run the next time it looks if not sickgear.search_queue_scheduler.action.is_standard_backlog_in_progress(): sickgear.backlog_search_scheduler.force_search(force_type=FORCED_BACKLOG) - logger.log(u'Backlog search forced') + logger.log('Backlog search forced') ui.notifications.message('Backlog search started') time.sleep(5) @@ -7186,7 +7177,7 @@ class ManageSearch(Manage): if not sickgear.search_queue_scheduler.action.is_recentsearch_in_progress(): result = sickgear.recent_search_scheduler.force_run() if result: - logger.log(u'Recent search forced') + logger.log('Recent search forced') ui.notifications.message('Recent search started') time.sleep(5) @@ -7197,7 +7188,7 @@ class ManageSearch(Manage): # force it to run the next time it looks result = sickgear.proper_finder_scheduler.force_run() if result: - logger.log(u'Find propers search forced') + logger.log('Find propers search forced') ui.notifications.message('Find propers search started') time.sleep(5) @@ -7307,7 +7298,7 @@ class ShowTasks(Manage): result = sickgear.show_update_scheduler.force_run() if result: - logger.log(u'Show Update forced') + logger.log('Show Update forced') ui.notifications.message('Forced Show Update started') time.sleep(5) @@ -7658,7 +7649,7 @@ class History(MainHandler): hosts, keys, message = client.check_config(sickgear.EMBY_HOST, sickgear.EMBY_APIKEY) if sickgear.USE_EMBY and hosts: - logger.log('Updating Emby watched episode states', logger.DEBUG) + logger.debug('Updating Emby watched episode states') rd = sickgear.ROOT_DIRS.split('|')[1:] \ + [x.split('=')[0] for x in sickgear.EMBY_PARENT_MAPS.split(',') if any(x)] @@ -7744,8 +7735,8 @@ class History(MainHandler): except (BaseException, Exception): continue if mapping: - logger.log('Folder mappings used, the first of %s is [%s] in Emby is [%s] in SickGear' % - (mapped, mapping[0], mapping[1]), logger.DEBUG) + logger.debug(f'Folder mappings used, the first of {mapped} is [{mapping[0]}] in Emby is' + f' [{mapping[1]}] in SickGear') if states: # Prune user removed items that are no longer being returned by API @@ -7767,7 +7758,7 @@ class History(MainHandler): hosts = [x.strip().lower() for x in sickgear.PLEX_SERVER_HOST.split(',')] if sickgear.USE_PLEX and hosts: - logger.log('Updating Plex watched episode states', logger.DEBUG) + logger.debug('Updating Plex watched episode states') from lib.plex import Plex @@ -7785,7 +7776,7 @@ class History(MainHandler): # noinspection HttpUrlsUsage parts = re.search(r'(.*):(\d+)$', urlparse('http://' + re.sub(r'^\w+://', '', cur_host)).netloc) if not parts: - logger.log('Skipping host not in min. host:port format : %s' % cur_host, logger.WARNING) + logger.warning('Skipping host not in min. host:port format : %s' % cur_host) elif parts.group(1): plex.plex_host = parts.group(1) if None is not parts.group(2): @@ -7810,11 +7801,10 @@ class History(MainHandler): idx += 1 - logger.log('Fetched %s of %s played for host : %s' % (len(plex.show_states), played, cur_host), - logger.DEBUG) + logger.debug('Fetched %s of %s played for host : %s' % (len(plex.show_states), played, cur_host)) if mapping: - logger.log('Folder mappings used, the first of %s is [%s] in Plex is [%s] in SickGear' % - (mapped, mapping[0], mapping[1]), logger.DEBUG) + logger.debug(f'Folder mappings used, the first of {mapped} is [{mapping[0]}] in Plex is' + f' [{mapping[1]}] in SickGear') if states: # Prune user removed items that are no longer being returned by API @@ -7866,7 +7856,7 @@ class History(MainHandler): result = helpers.remove_file(cur_result['location']) if result: - logger.log(u'%s file %s' % (result, cur_result['location'])) + logger.log(f'{result} file {cur_result["location"]}') deleted.update({cur_result['tvep_id']: row_show_ids[cur_result['rowid']]}) if row_show_ids[cur_result['rowid']] not in refresh: @@ -8075,7 +8065,7 @@ class ConfigGeneral(Config): # Return a hex digest of the md5, e.g. 49f68a5c8493ec2c0bf489821c21fc3b app_name = kwargs.get('app_name') app_name = '' if not app_name else ' for [%s]' % app_name - logger.log(u'New API generated%s' % app_name) + logger.log(f'New API generated{app_name}') return result @@ -8134,7 +8124,7 @@ class ConfigGeneral(Config): result['result'] = 'Failed: apikey already exists, try again' else: sickgear.API_KEYS.append([app_name, api_key]) - logger.log('Created apikey for [%s]' % app_name, logger.DEBUG) + logger.debug('Created apikey for [%s]' % app_name) result.update(dict(result='Success: apikey added', added=api_key)) sickgear.USE_API = 1 sickgear.save_config() @@ -8153,7 +8143,7 @@ class ConfigGeneral(Config): result['result'] = 'Failed: key doesn\'t exist' else: sickgear.API_KEYS = [ak for ak in sickgear.API_KEYS if ak[0] and api_key != ak[1]] - logger.log('Revoked [%s] apikey [%s]' % (app_name, api_key), logger.DEBUG) + logger.debug('Revoked [%s] apikey [%s]' % (app_name, api_key)) result.update(dict(result='Success: apikey removed', removed=True)) sickgear.save_config() ui.notifications.message('Configuration Saved', os.path.join(sickgear.CONFIG_FILE)) @@ -8196,7 +8186,7 @@ class ConfigGeneral(Config): with sickgear.show_update_scheduler.lock: sickgear.show_update_scheduler.start_time = datetime.time(hour=sickgear.SHOW_UPDATE_HOUR) except (BaseException, Exception) as e: - logger.log('Could not change Show Update Scheduler time: %s' % ex(e), logger.ERROR) + logger.error('Could not change Show Update Scheduler time: %s' % ex(e)) sickgear.TRASH_REMOVE_SHOW = config.checkbox_to_value(trash_remove_show) sg_helpers.TRASH_REMOVE_SHOW = sickgear.TRASH_REMOVE_SHOW sickgear.TRASH_ROTATE_LOGS = config.checkbox_to_value(trash_rotate_logs) @@ -8231,14 +8221,14 @@ class ConfigGeneral(Config): # not deleted. Deduped list order preservation is key to feature function. my_db = db.DBConnection() sql_result = my_db.select('SELECT DISTINCT tag FROM tv_shows') - new_names = [u'' + v.strip() for v in (show_tags.split(u','), [])[None is show_tags] if v.strip()] + new_names = [v.strip() for v in (show_tags.split(','), [])[None is show_tags] if v.strip()] orphans = [item for item in [v['tag'] for v in sql_result or []] if item not in new_names] cleanser = [] if 0 < len(orphans): cleanser = [item for item in sickgear.SHOW_TAGS if item in orphans or item in new_names] - results += [u'An attempt was prevented to remove a show list group name still in use'] + results += ['An attempt was prevented to remove a show list group name still in use'] dedupe = {} - sickgear.SHOW_TAGS = [dedupe.setdefault(item, item) for item in (cleanser + new_names + [u'Show List']) + sickgear.SHOW_TAGS = [dedupe.setdefault(item, item) for item in (cleanser + new_names + ['Show List']) if item not in dedupe] sickgear.HOME_SEARCH_FOCUS = config.checkbox_to_value(home_search_focus) @@ -8251,7 +8241,7 @@ class ConfigGeneral(Config): sickgear.DATE_PRESET = date_preset if time_preset: sickgear.TIME_PRESET_W_SECONDS = time_preset - sickgear.TIME_PRESET = sickgear.TIME_PRESET_W_SECONDS.replace(u':%S', u'') + sickgear.TIME_PRESET = sickgear.TIME_PRESET_W_SECONDS.replace(':%S', '') sickgear.TIMEZONE_DISPLAY = timezone_display # Web interface @@ -8307,7 +8297,7 @@ class ConfigGeneral(Config): if 0 < len(results): for v in results: - logger.log(v, logger.ERROR) + logger.error(v) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: @@ -8331,7 +8321,7 @@ class ConfigGeneral(Config): pulls = sickgear.update_software_scheduler.action.list_remote_pulls() return json_dumps({'result': 'success', 'pulls': pulls}) except (BaseException, Exception) as e: - logger.log(u'exception msg: ' + ex(e), logger.DEBUG) + logger.debug(f'exception msg: {ex(e)}') return json_dumps({'result': 'fail'}) @staticmethod @@ -8340,7 +8330,7 @@ class ConfigGeneral(Config): branches = sickgear.update_software_scheduler.action.list_remote_branches() return json_dumps({'result': 'success', 'branches': branches, 'current': sickgear.BRANCH or 'main'}) except (BaseException, Exception) as e: - logger.log(u'exception msg: ' + ex(e), logger.DEBUG) + logger.debug(f'exception msg: {ex(e)}') return json_dumps({'result': 'fail'}) @@ -8465,7 +8455,7 @@ class ConfigSearch(Config): sickgear.TORRENT_LABEL = torrent_label sickgear.TORRENT_LABEL_VAR = config.to_int((0, torrent_label_var)['rtorrent' == torrent_method], 1) if not (0 <= sickgear.TORRENT_LABEL_VAR <= 5): - logger.log('Setting rTorrent custom%s is not 0-5, defaulting to custom1' % torrent_label_var, logger.DEBUG) + logger.debug('Setting rTorrent custom%s is not 0-5, defaulting to custom1' % torrent_label_var) sickgear.TORRENT_LABEL_VAR = 1 sickgear.TORRENT_VERIFY_CERT = config.checkbox_to_value(torrent_verify_cert) sickgear.TORRENT_PATH = torrent_path @@ -8478,7 +8468,7 @@ class ConfigSearch(Config): if 0 < len(results): for x in results: - logger.log(x, logger.ERROR) + logger.error(x) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: @@ -8605,7 +8595,7 @@ class ConfigMediaProcess(Config): if 0 < len(results): for x in results: - logger.log(x, logger.ERROR) + logger.error(x) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: @@ -8679,7 +8669,7 @@ class ConfigMediaProcess(Config): except (BaseException, Exception) as e: msg = ex(e) - logger.log(u'Rar Not Supported: %s' % msg, logger.ERROR) + logger.error(f'Rar Not Supported: {msg}') return 'not supported' @@ -9019,7 +9009,7 @@ class ConfigProviders(Config): if 0 < len(results): for x in results: - logger.log(x, logger.ERROR) + logger.error(x) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: ui.notifications.message('Configuration Saved', os.path.join(sickgear.CONFIG_FILE)) @@ -9286,7 +9276,7 @@ class ConfigNotifications(Config): if 0 < len(results): for x in results: - logger.log(x, logger.ERROR) + logger.error(x) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: @@ -9341,7 +9331,7 @@ class ConfigSubtitles(Config): if 0 < len(results): for x in results: - logger.log(x, logger.ERROR) + logger.error(x) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: @@ -9374,7 +9364,7 @@ class ConfigAnime(Config): if 0 < len(results): for x in results: - logger.log(x, logger.ERROR) + logger.error(x) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: diff --git a/sickgear/webserveInit.py b/sickgear/webserveInit.py index 025afaa6..8a46f1d4 100644 --- a/sickgear/webserveInit.py +++ b/sickgear/webserveInit.py @@ -74,7 +74,7 @@ class WebServer(threading.Thread): # If either the HTTPS certificate or key do not exist, make some self-signed ones. if make_cert: if not create_https_certificates(self.https_cert, self.https_key): - logger.log(u'Unable to create CERT/KEY files, disabling HTTPS') + logger.log('Unable to create CERT/KEY files, disabling HTTPS') update_cfg |= False is not sickgear.ENABLE_HTTPS sickgear.ENABLE_HTTPS = False self.enable_https = False @@ -82,7 +82,7 @@ class WebServer(threading.Thread): update_cfg = True if not (os.path.isfile(self.https_cert) and os.path.isfile(self.https_key)): - logger.log(u'Disabled HTTPS because of missing CERT and KEY files', logger.WARNING) + logger.warning('Disabled HTTPS because of missing CERT and KEY files') update_cfg |= False is not sickgear.ENABLE_HTTPS sickgear.ENABLE_HTTPS = False self.enable_https = False @@ -231,7 +231,7 @@ class WebServer(threading.Thread): protocol, ssl_options = (('http', None), ('https', {'certfile': self.https_cert, 'keyfile': self.https_key}))[self.enable_https] - logger.log(u'Starting SickGear on %s://%s:%s/' % (protocol, self.options['host'], self.options['port'])) + logger.log(f'Starting SickGear on {protocol}://{self.options["host"]}:{self.options["port"]}/') # python 3 needs to start event loop first import asyncio @@ -244,8 +244,7 @@ class WebServer(threading.Thread): xheaders=sickgear.HANDLE_REVERSE_PROXY, protocol=protocol) except (BaseException, Exception): etype, evalue, etb = exc_info() - logger.log('Could not start webserver on %s. Exception: %s, Error: %s' % ( - self.options['port'], etype, evalue), logger.ERROR) + logger.error(f'Could not start webserver on {self.options["port"]}. Exception: {etype}, Error: {evalue}') return self.io_loop = IOLoop.current() @@ -271,7 +270,7 @@ class WebServer(threading.Thread): getattr(s, nh)() sickgear.classes.loading_msg.reset() self.io_loop.add_callback(d_f, self, new_handler) - logger.log('Switching HTTP Server handlers to %s' % new_handler, logger.DEBUG) + logger.debug('Switching HTTP Server handlers to %s' % new_handler) def shut_down(self): self.alive = False diff --git a/tests/migration_tests.py b/tests/migration_tests.py index 2a2d3a6c..a03cd27c 100644 --- a/tests/migration_tests.py +++ b/tests/migration_tests.py @@ -109,22 +109,18 @@ class OldInitialSchema(db.SchemaUpgrade): if cur_db_version < MIN_DB_VERSION: logger.log_error_and_exit( - u'Your database version (' - + str(cur_db_version) - + ') is too old to migrate from what this version of SickGear supports (' - + str(MIN_DB_VERSION) + ').' + '\n' - + 'Upgrade using a previous version (tag) build 496 to build 501 of SickGear first or' - ' remove database file to begin fresh.' + f'Your database version ({str(cur_db_version)}) is too old to migrate from what' + f' this version of SickGear supports ({str(MIN_DB_VERSION)}).\n' + f'Upgrade using a previous version (tag) build 496 to build 501 of SickGear first' + f' or remove database file to begin fresh.' ) if cur_db_version > MAX_DB_VERSION: logger.log_error_and_exit( - u'Your database version (' - + str(cur_db_version) - + ') has been incremented past what this version of SickGear supports (' - + str(MAX_DB_VERSION) + ').' + '\n' - + 'If you have used other forks of SickGear,' - ' your database may be unusable due to their modifications.' + f'Your database version ({str(cur_db_version)}) has been incremented past what' + f' this version of SickGear supports ({str(MAX_DB_VERSION)}).\n' + f'If you have used other forks of SickGear,' + f' your database may be unusable due to their modifications.' ) return self.call_check_db_version() diff --git a/tests/name_parser_tests.py b/tests/name_parser_tests.py index 6e052375..05cefe99 100644 --- a/tests/name_parser_tests.py +++ b/tests/name_parser_tests.py @@ -367,16 +367,16 @@ combination_test_cases = [ ] unicode_test_cases = [ - (u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', + ('The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', parser.ParseResult( - u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', - u'The Big Bang Theory', 2, [7], u'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', u'SHELDON', + 'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', + 'The Big Bang Theory', 2, [7], 'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', 'SHELDON', version=-1) ), ('The.Big.Bang.Theory.2x07.The.Panty.Pi\xc3\xb1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', parser.ParseResult( - u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', - u'The Big Bang Theory', 2, [7], u'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', u'SHELDON', + 'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', + 'The Big Bang Theory', 2, [7], 'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', 'SHELDON', version=-1) ), ] diff --git a/tests/newznab_tests.py b/tests/newznab_tests.py index 25bacb50..a5afea27 100644 --- a/tests/newznab_tests.py +++ b/tests/newznab_tests.py @@ -39,12 +39,12 @@ item_parse_test_cases = [ ('Show.Name.S02E04.720p.HDTV.x264-GROUP', 'https://test.h')), (('Show.Name.S02E05.720p.HDTV.x264-GROUP-JUNK[JUNK]', 'https://test.h'), ('Show.Name.S02E05.720p.HDTV.x264-GROUP', 'https://test.h')), - ((u'Show.Name.S02E06.720p.HDTV.x264-GROUP-JUNK[JUNK帝]', 'https://test.h'), - (u'Show.Name.S02E06.720p.HDTV.x264-GROUP', 'https://test.h')), - ((u'Show.Name.S02E07-EpName帝.720p.HDTV.x264-GROUP帝-JUNK[JUNK帝]', 'https://test.h'), - (u'Show.Name.S02E07-EpName帝.720p.HDTV.x264-GROUP帝', 'https://test.h')), - ((u'[grp 帝] Show Name - 11 [1024x576 h264 AAC ger-sub][123456].mp4', 'https://test.h'), - (u'[grp.帝].Show.Name.-.11.[1024x576.h264.AAC.ger-sub][123456]', 'https://test.h')), + (('Show.Name.S02E06.720p.HDTV.x264-GROUP-JUNK[JUNK帝]', 'https://test.h'), + ('Show.Name.S02E06.720p.HDTV.x264-GROUP', 'https://test.h')), + (('Show.Name.S02E07-EpName帝.720p.HDTV.x264-GROUP帝-JUNK[JUNK帝]', 'https://test.h'), + ('Show.Name.S02E07-EpName帝.720p.HDTV.x264-GROUP帝', 'https://test.h')), + (('[grp 帝] Show Name - 11 [1024x576 h264 AAC ger-sub][123456].mp4', 'https://test.h'), + ('[grp.帝].Show.Name.-.11.[1024x576.h264.AAC.ger-sub][123456]', 'https://test.h')), ] size_test_cases = [ diff --git a/tests/scene_helpers_tests.py b/tests/scene_helpers_tests.py index e49e27aa..29f80deb 100644 --- a/tests/scene_helpers_tests.py +++ b/tests/scene_helpers_tests.py @@ -100,8 +100,8 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase): sickgear.showDict[s.sid_int] = s scene_exceptions.retrieve_exceptions() name_cache.build_name_cache() - self.assertEqual(scene_exceptions.get_scene_exception_by_name(u'ブラック・ラグーン'), [1, 79604, -1]) - self.assertEqual(scene_exceptions.get_scene_exception_by_name(u'Burakku Ragūn'), [1, 79604, -1]) + self.assertEqual(scene_exceptions.get_scene_exception_by_name('ブラック・ラグーン'), [1, 79604, -1]) + self.assertEqual(scene_exceptions.get_scene_exception_by_name('Burakku Ragūn'), [1, 79604, -1]) self.assertEqual(scene_exceptions.get_scene_exception_by_name('Rokka no Yuusha'), [1, 295243, -1]) def test_sceneExceptionByNameEmpty(self): diff --git a/tests/test_lib.py b/tests/test_lib.py index e3fe8be3..58b07ac8 100644 --- a/tests/test_lib.py +++ b/tests/test_lib.py @@ -42,10 +42,10 @@ TESTDBNAME = 'sickbeard.db' TESTCACHEDBNAME = 'cache.db' TESTFAILEDDBNAME = 'failed.db' -SHOWNAME = u'show name' +SHOWNAME = 'show name' SEASON = 4 EPISODE = 2 -FILENAME = u'show name - s0' + str(SEASON) + 'e0' + str(EPISODE) + '.mkv' +FILENAME = f'show name - s0{SEASON}e0{EPISODE}.mkv' FILEDIR = os.path.join(TESTDIR, SHOWNAME) FILEPATH = os.path.join(FILEDIR, FILENAME) diff --git a/tests/xem_tests.py b/tests/xem_tests.py index 81fa0cfb..1d3cb6d6 100644 --- a/tests/xem_tests.py +++ b/tests/xem_tests.py @@ -61,12 +61,12 @@ class XEMBasicTests(test.SickbeardTestDBCase): curRegex = '^' + escaped_name + r'\W+(?:(?:S\d[\dE._ -])|(?:\d\d?x)|(?:\d{4}\W\d\d\W\d\d)|(?:(?:part|pt)' \ r'[\._ -]?(\d|[ivx]))|Season\W+\d+\W+|E\d+\W+|(?:\d{1,3}.+\d{1,}[a-zA-Z]{2}' \ r'\W+[a-zA-Z]{3,}\W+\d{4}.+))' - # print(u"Checking if show " + name + " matches " + curRegex) + # print("Checking if show " + name + " matches " + curRegex) # noinspection PyUnusedLocal match = re.search(curRegex, name, re.I) # if match: - # print(u"Matched " + curRegex + " to " + name) + # print("Matched " + curRegex + " to " + name) if '__main__' == __name__: From 19f0a951e5ab35cd9bac07f913432de5aa3099d8 Mon Sep 17 00:00:00 2001 From: JackDandy Date: Wed, 8 Mar 2023 17:36:24 +0000 Subject: [PATCH 14/21] Refactor `timestamp_near` to `SGDatetime.timestamp_near`. --- CHANGES.md | 1 + sickgear/clients/download_station.py | 5 ++--- sickgear/clients/qbittorrent.py | 5 ++--- sickgear/db.py | 4 ++-- sickgear/helpers.py | 8 +++---- sickgear/image_cache.py | 6 +++--- sickgear/logger.py | 4 ++-- sickgear/processTV.py | 4 ++-- sickgear/properFinder.py | 6 +++--- sickgear/providers/generic.py | 12 +++++------ sickgear/providers/newznab.py | 4 ++-- sickgear/scene_exceptions.py | 8 +++---- sickgear/scene_numbering.py | 6 +++--- sickgear/search_backlog.py | 6 +++--- sickgear/sgdatetime.py | 31 ++++++++++++++++++---------- sickgear/tv.py | 4 ++-- sickgear/tvcache.py | 8 +++---- sickgear/webserve.py | 18 ++++++++-------- 18 files changed, 74 insertions(+), 66 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index df558de5..715b4344 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -14,6 +14,7 @@ * Add logging around the restart/shutdown event * Update package resource API 63.2.0 (3ae44cd) to 67.3.2 (b9bf2ec) +* Refactor `timestamp_near` to `SGDatetime.timestamp_near` ### 3.27.11 (2023-03-06 23:40:00 UTC) diff --git a/sickgear/clients/download_station.py b/sickgear/clients/download_station.py index 2752ab48..42111909 100644 --- a/sickgear/clients/download_station.py +++ b/sickgear/clients/download_station.py @@ -17,13 +17,12 @@ # Uses the Synology Download Station API: # http://download.synology.com/download/Document/DeveloperGuide/Synology_Download_Station_Web_API.pdf -from datetime import datetime import re import time from .generic import GenericClient from .. import logger -from ..sgdatetime import timestamp_near +from ..sgdatetime import SGDatetime import sickgear from _23 import unquote_plus @@ -298,7 +297,7 @@ class DownloadStationAPI(GenericClient): if 1 < self._task_version and sickgear.TORRENT_PATH: params['destination'] = re.sub(r'^/(volume\d*/)?', '', sickgear.TORRENT_PATH) - task_stamp = int(timestamp_near(datetime.now())) + task_stamp = SGDatetime.timestamp_near() response = self._client_request('create', t_params=params, files=files) # noinspection PyUnresolvedReferences if response and response.get('success'): diff --git a/sickgear/clients/qbittorrent.py b/sickgear/clients/qbittorrent.py index 6f571128..36737f59 100644 --- a/sickgear/clients/qbittorrent.py +++ b/sickgear/clients/qbittorrent.py @@ -14,14 +14,13 @@ # You should have received a copy of the GNU General Public License # along with SickGear. If not, see . -from datetime import datetime import re import time from .generic import GenericClient from .. import logger from ..helpers import get_url, try_int -from ..sgdatetime import timestamp_near +from ..sgdatetime import SGDatetime import sickgear from requests.exceptions import HTTPError @@ -372,7 +371,7 @@ class QbittorrentAPI(GenericClient): else: kwargs = dict(post_data=params, files={'torrents': ('%s.torrent' % data.name, data.content)}) - task_stamp = int(timestamp_near(datetime.now())) + task_stamp = SGDatetime.timestamp_near() response = self._client_request(('torrents/add', 'command/%s' % cmd)[not self.api_ns], **kwargs) if True is response: diff --git a/sickgear/db.py b/sickgear/db.py index c1efaed1..b20485fa 100644 --- a/sickgear/db.py +++ b/sickgear/db.py @@ -28,7 +28,7 @@ from exceptions_helper import ex import sickgear from . import logger, sgdatetime -from .sgdatetime import timestamp_near +from .sgdatetime import SGDatetime from sg_helpers import make_path, compress_file, remove_file_perm, scantree @@ -841,7 +841,7 @@ def backup_all_dbs(target, compress=True, prefer_7z=True): my_db = DBConnection('cache.db') last_backup = my_db.select('SELECT time FROM lastUpdate WHERE provider = ?', ['sickgear_db_backup']) if last_backup: - now_stamp = int(timestamp_near(datetime.datetime.now())) + now_stamp = SGDatetime.timestamp_near() the_time = int(last_backup[0]['time']) # only backup every 23 hours if now_stamp - the_time < 60 * 60 * 23: diff --git a/sickgear/helpers.py b/sickgear/helpers.py index 4cb670a3..4c78e787 100644 --- a/sickgear/helpers.py +++ b/sickgear/helpers.py @@ -34,7 +34,7 @@ import sickgear from . import db, logger, notifiers from .common import cpu_presets, mediaExtensions, Overview, Quality, statusStrings, subtitleExtensions, \ ARCHIVED, DOWNLOADED, FAILED, IGNORED, SKIPPED, SNATCHED_ANY, SUBTITLED, UNAIRED, UNKNOWN, WANTED -from .sgdatetime import timestamp_near +from .sgdatetime import SGDatetime from lib.tvinfo_base.exceptions import * from exceptions_helper import ex, MultipleShowObjectsException @@ -1031,7 +1031,7 @@ def clear_cache(force=False): """ # clean out cache directory, remove everything > 12 hours old dirty = None - del_time = int(timestamp_near((datetime.datetime.now() - datetime.timedelta(hours=12)))) + del_time = SGDatetime.timestamp_near(td=datetime.timedelta(hours=12)) direntry_args = dict(follow_symlinks=False) for direntry in scantree(sickgear.CACHE_DIR, ['images|rss|zoneinfo'], follow_symlinks=True): if direntry.is_file(**direntry_args) and (force or del_time > direntry.stat(**direntry_args).st_mtime): @@ -1342,7 +1342,7 @@ def delete_not_changed_in(paths, days=30, minutes=0): :param minutes: Purge files not modified in this number of minutes (default: 0 minutes) :return: tuple; number of files that qualify for deletion, number of qualifying files that failed to be deleted """ - del_time = int(timestamp_near((datetime.datetime.now() - datetime.timedelta(days=days, minutes=minutes)))) + del_time = SGDatetime.timestamp_near(td=datetime.timedelta(days=days, minutes=minutes)) errors = 0 qualified = 0 for cur_path in (paths, [paths])[not isinstance(paths, list)]: @@ -1367,7 +1367,7 @@ def set_file_timestamp(filename, min_age=3, new_time=None): :param new_time: :type new_time: None or int """ - min_time = int(timestamp_near((datetime.datetime.now() - datetime.timedelta(days=min_age)))) + min_time = SGDatetime.timestamp_near(td=datetime.timedelta(days=min_age)) try: if os.path.isfile(filename) and os.path.getmtime(filename) < min_time: os.utime(filename, new_time) diff --git a/sickgear/image_cache.py b/sickgear/image_cache.py index 10fe08da..09f8910c 100644 --- a/sickgear/image_cache.py +++ b/sickgear/image_cache.py @@ -26,7 +26,7 @@ import sickgear import sg_helpers from . import db, logger from .metadata.generic import GenericMetadata -from .sgdatetime import timestamp_near +from .sgdatetime import SGDatetime from .indexers.indexer_config import TVINFO_TVDB, TVINFO_TVMAZE, TVINFO_TMDB, TVINFO_IMDB from six import itervalues, iteritems @@ -465,7 +465,7 @@ class ImageCache(object): minutes_iv = 60 * 3 # daily_interval = 60 * 60 * 23 iv = minutes_iv - now_stamp = int(timestamp_near(datetime.datetime.now())) + now_stamp = SGDatetime.timestamp_near() the_time = int(sql_result[0]['time']) return now_stamp - the_time > iv @@ -482,7 +482,7 @@ class ImageCache(object): """ my_db = db.DBConnection('cache.db') my_db.upsert('lastUpdate', - {'time': int(timestamp_near(datetime.datetime.now()))}, + {'time': SGDatetime.timestamp_near()}, {'provider': 'imsg_%s_%s' % ((image_type, self.FANART)[None is image_type], provider)}) def _cache_image_from_file(self, image_path, img_type, tvid, prodid, prefix='', move_file=False): diff --git a/sickgear/logger.py b/sickgear/logger.py index 702fdb8f..1ecbca02 100644 --- a/sickgear/logger.py +++ b/sickgear/logger.py @@ -31,7 +31,7 @@ from logging.handlers import TimedRotatingFileHandler import sickgear from . import classes -from .sgdatetime import timestamp_near +from .sgdatetime import SGDatetime from sg_helpers import md5_for_text, remove_file_perm # noinspection PyUnreachableCode @@ -198,7 +198,7 @@ class SBRotatingLogHandler(object): mem_key = 'logger' for to_log in log_list: log_id = md5_for_text(to_log) - now = int(timestamp_near(datetime.datetime.now())) + now = SGDatetime.timestamp_near() expired = now > sickgear.MEMCACHE.get(mem_key, {}).get(log_id, 0) sickgear.MEMCACHE[mem_key] = {} sickgear.MEMCACHE[mem_key][log_id] = 2 + now diff --git a/sickgear/processTV.py b/sickgear/processTV.py index f6a0a5bb..16326af2 100644 --- a/sickgear/processTV.py +++ b/sickgear/processTV.py @@ -33,7 +33,7 @@ from . import common, db, failedProcessor, helpers, logger, notifiers, postProce from .common import SNATCHED_ANY from .history import reset_status from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser -from .sgdatetime import timestamp_near +from .sgdatetime import SGDatetime from six import iteritems, iterkeys, string_types, text_type from sg_helpers import long_path, scantree @@ -571,7 +571,7 @@ class ProcessTVShow(object): archives = [os.path.basename(x) for x in unused_files] if unused_files: for f in unused_files: - archive_history.setdefault(f, int(timestamp_near(datetime.datetime.utcnow()))) + archive_history.setdefault(f, SGDatetime.timestamp_near(datetime.datetime.utcnow())) if init_history_cnt != len(archive_history): try: diff --git a/sickgear/properFinder.py b/sickgear/properFinder.py index 12a07326..1397e06a 100644 --- a/sickgear/properFinder.py +++ b/sickgear/properFinder.py @@ -30,7 +30,7 @@ from .common import ARCHIVED, FAILED, DOWNLOADED, SNATCHED_ANY, SNATCHED_PROPER, NeededQualities, Quality from .history import dateFormat from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser -from .sgdatetime import timestamp_near +from .sgdatetime import SGDatetime from _23 import map_consume from six import string_types @@ -689,10 +689,10 @@ def _set_last_proper_search(when): if 0 == len(sql_result): my_db.action('INSERT INTO info (last_backlog, last_indexer, last_proper_search) VALUES (?,?,?)', - [0, 0, int(timestamp_near(when))]) + [0, 0, SGDatetime.timestamp_near(when)]) else: # noinspection SqlConstantCondition - my_db.action('UPDATE info SET last_proper_search=%s WHERE 1=1' % int(timestamp_near(when))) + my_db.action('UPDATE info SET last_proper_search=%s WHERE 1=1' % SGDatetime.timestamp_near(when)) def next_proper_timeleft(): diff --git a/sickgear/providers/generic.py b/sickgear/providers/generic.py index 17c00f20..dab1fc5c 100644 --- a/sickgear/providers/generic.py +++ b/sickgear/providers/generic.py @@ -39,7 +39,7 @@ from ..helpers import maybe_plural, remove_file_perm from ..name_parser.parser import InvalidNameException, InvalidShowException, NameParser from ..scene_exceptions import has_season_exceptions from ..show_name_helpers import get_show_names_all_possible -from ..sgdatetime import SGDatetime, timestamp_near +from ..sgdatetime import SGDatetime from ..tv import TVEpisode, TVShow from cfscrape import CloudflareScraper @@ -112,7 +112,7 @@ class ProviderFailList(object): date_time = datetime.datetime.combine(fail_date, datetime.time(hour=fail_hour)) if ProviderFailTypes.names[e.fail_type] not in fail_dict.get(date_time, {}): if isinstance(e.fail_time, datetime.datetime): - value = timestamp_near(e.fail_time) + value = SGDatetime.timestamp_near(e.fail_time, return_int=False) else: value = SGDatetime.timestamp_far(e.fail_time) default = {'date': str(fail_date), 'date_time': date_time, @@ -178,7 +178,7 @@ class ProviderFailList(object): cl = [] for f in self._fails: if isinstance(f.fail_time, datetime.datetime): - value = int(timestamp_near(f.fail_time)) + value = SGDatetime.timestamp_near(f.fail_time) else: value = SGDatetime.timestamp_far(f.fail_time) cl.append(['INSERT OR IGNORE INTO provider_fails (prov_name, fail_type, fail_code, fail_time) ' @@ -211,7 +211,7 @@ class ProviderFailList(object): my_db = db.DBConnection('cache.db') if my_db.has_table('provider_fails'): # noinspection PyCallByClass,PyTypeChecker - time_limit = int(timestamp_near(datetime.datetime.now() - datetime.timedelta(days=28))) + time_limit = SGDatetime.timestamp_near(td=datetime.timedelta(days=28)) my_db.action('DELETE FROM provider_fails WHERE fail_time < ?', [time_limit]) except (BaseException, Exception): pass @@ -340,7 +340,7 @@ class GenericProvider(object): self._failure_time = value if changed_val: if isinstance(value, datetime.datetime): - value = int(timestamp_near(value)) + value = SGDatetime.timestamp_near(value) elif value: # noinspection PyCallByClass value = SGDatetime.timestamp_far(value) @@ -370,7 +370,7 @@ class GenericProvider(object): self._tmr_limit_time = value if changed_val: if isinstance(value, datetime.datetime): - value = int(timestamp_near(value)) + value = SGDatetime.timestamp_near(value) elif value: # noinspection PyCallByClass value = SGDatetime.timestamp_far(value) diff --git a/sickgear/providers/newznab.py b/sickgear/providers/newznab.py index fb7cc782..b9cac5e2 100644 --- a/sickgear/providers/newznab.py +++ b/sickgear/providers/newznab.py @@ -31,7 +31,7 @@ from ..common import NeededQualities, Quality from ..helpers import remove_non_release_groups from ..indexers.indexer_config import * from ..network_timezones import SG_TIMEZONE -from ..sgdatetime import SGDatetime, timestamp_near +from ..sgdatetime import SGDatetime from ..search import get_aired_in_season, get_wanted_qualities from ..show_name_helpers import get_show_names from ..scene_exceptions import has_season_exceptions @@ -217,7 +217,7 @@ class NewznabProvider(generic.NZBProvider): try: my_db = db.DBConnection('cache.db') if isinstance(value, datetime.datetime): - save_value = int(timestamp_near(value)) + save_value = SGDatetime.timestamp_near(value) else: save_value = SGDatetime.timestamp_far(value, default=0) my_db.action('INSERT OR REPLACE INTO "lastrecentsearch" (name, datetime) VALUES (?,?)', diff --git a/sickgear/scene_exceptions.py b/sickgear/scene_exceptions.py index 3e4064fe..148285cc 100644 --- a/sickgear/scene_exceptions.py +++ b/sickgear/scene_exceptions.py @@ -31,7 +31,7 @@ from . import db, helpers, logger, name_cache from .anime import create_anidb_obj from .classes import OrderedDefaultdict from .indexers.indexer_config import TVINFO_TVDB -from .sgdatetime import timestamp_near +from .sgdatetime import SGDatetime import lib.rarfile.rarfile as rarfile @@ -68,9 +68,9 @@ def should_refresh(name, max_refresh_age_secs=86400, remaining=False): if rows: last_refresh = int(rows[0]['last_refreshed']) if remaining: - time_left = (last_refresh + max_refresh_age_secs - int(timestamp_near(datetime.datetime.now()))) + time_left = (last_refresh + max_refresh_age_secs - SGDatetime.timestamp_near()) return (0, time_left)[time_left > 0] - return int(timestamp_near(datetime.datetime.now())) > last_refresh + max_refresh_age_secs + return SGDatetime.timestamp_near() > last_refresh + max_refresh_age_secs return True @@ -82,7 +82,7 @@ def set_last_refresh(name): """ my_db = db.DBConnection() my_db.upsert('scene_exceptions_refresh', - {'last_refreshed': int(timestamp_near(datetime.datetime.now()))}, + {'last_refreshed': SGDatetime.timestamp_near()}, {'list': name}) diff --git a/sickgear/scene_numbering.py b/sickgear/scene_numbering.py index 885fc527..2afc2914 100644 --- a/sickgear/scene_numbering.py +++ b/sickgear/scene_numbering.py @@ -30,7 +30,7 @@ import sickgear from . import db, logger from .helpers import try_int from .scene_exceptions import xem_ids_list -from .sgdatetime import timestamp_near +from .sgdatetime import SGDatetime # noinspection PyUnreachableCode if False: @@ -794,7 +794,7 @@ def xem_refresh(tvid, prodid, force=False): """, [tvid, prodid]) if sql_result: last_refresh = int(sql_result[0]['last_refreshed']) - refresh = int(timestamp_near(datetime.datetime.now())) > last_refresh + max_refresh_age_secs + refresh = SGDatetime.timestamp_near() > last_refresh + max_refresh_age_secs else: refresh = True @@ -803,7 +803,7 @@ def xem_refresh(tvid, prodid, force=False): # mark refreshed my_db.upsert('xem_refresh', - dict(last_refreshed=int(timestamp_near(datetime.datetime.now()))), + dict(last_refreshed=SGDatetime.timestamp_near()), dict(indexer=tvid, indexer_id=prodid)) try: diff --git a/sickgear/search_backlog.py b/sickgear/search_backlog.py index e2936f52..ecd9b369 100644 --- a/sickgear/search_backlog.py +++ b/sickgear/search_backlog.py @@ -25,7 +25,7 @@ from . import db, logger, scheduler, search_queue, ui from .helpers import find_show_by_id from .providers.generic import GenericProvider from .search import wanted_episodes -from .sgdatetime import SGDatetime, timestamp_near +from .sgdatetime import SGDatetime from .tv import TVidProdid, TVEpisode, TVShow from six import iteritems, itervalues, moves @@ -344,7 +344,7 @@ class BacklogSearcher(object): last_run_time = 1 else: last_run_time = int(sql_result[0]['last_run_backlog']) - if last_run_time > int(timestamp_near(datetime.datetime.now())): + if last_run_time > SGDatetime.timestamp_near(): last_run_time = 1 return last_run_time @@ -356,7 +356,7 @@ class BacklogSearcher(object): sql_result = my_db.select('SELECT * FROM info') if isinstance(when, datetime.datetime): - when = int(timestamp_near(when)) + when = SGDatetime.timestamp_near(when) else: when = SGDatetime.timestamp_far(when, default=0) if 0 == len(sql_result): diff --git a/sickgear/sgdatetime.py b/sickgear/sgdatetime.py index 1e6ffaf0..5beada3a 100644 --- a/sickgear/sgdatetime.py +++ b/sickgear/sgdatetime.py @@ -282,16 +282,25 @@ class SGDatetime(datetime.datetime): finally: return (default, timestamp)[isinstance(timestamp, (float, integer_types))] + @static_or_instance + def timestamp_near(self, + dt=None, # type: Optional[SGDatetime, datetime.datetime] + td=None, # type: Optional[datetime.timedelta] + return_int=True # type: bool + ): + # type: (...) -> Union[float, integer_types] + """ + Use `timestamp_near` for a timestamp in the near future or near past -# noinspection PyUnreachableCode -if False: - # just to trick pycharm in correct type detection - # noinspection PyUnusedLocal - def timestamp_near(d_t): - # type: (datetime.datetime) -> float - pass + Raises exception if dt cannot be converted to int - -# py3 native timestamp uses milliseconds -# noinspection PyRedeclaration -timestamp_near = datetime.datetime.timestamp + td is timedelta to subtract from datetime + """ + obj = (dt, self)[self is not None] # type: datetime.datetime + if None is obj: + obj = datetime.datetime.now() + if isinstance(td, datetime.timedelta): + obj -= td + if not return_int: + return datetime.datetime.timestamp(obj) + return int(datetime.datetime.timestamp(obj)) diff --git a/sickgear/tv.py b/sickgear/tv.py index b9e6d5db..8073d907 100644 --- a/sickgear/tv.py +++ b/sickgear/tv.py @@ -52,7 +52,7 @@ from .helpers import try_float, try_int from .indexermapper import del_mapping, MapStatus, save_mapping from .indexers.indexer_config import TVINFO_IMDB, TVINFO_TMDB, TVINFO_TRAKT, TVINFO_TVDB, TVINFO_TVMAZE, TVINFO_TVRAGE from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser -from .sgdatetime import SGDatetime, timestamp_near +from .sgdatetime import SGDatetime from .tv_base import TVEpisodeBase, TVShowBase from lib import imdbpie, subliminal @@ -1530,7 +1530,7 @@ class TVShow(TVShowBase): self._last_found_on_indexer = self.last_found_on_indexer my_db = db.DBConnection() # noinspection PyUnresolvedReferences - last_check = int(timestamp_near(datetime.datetime.now())) + last_check = SGDatetime.timestamp_near() # in case of flag change (+/-) don't change last_check date if abs(v) == abs(self._not_found_count): sql_result = my_db.select( diff --git a/sickgear/tvcache.py b/sickgear/tvcache.py index 3bab265e..d7fbd365 100644 --- a/sickgear/tvcache.py +++ b/sickgear/tvcache.py @@ -27,7 +27,7 @@ from .classes import SearchResult from .common import Quality from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser, ParseResult from .rssfeeds import RSSFeeds -from .sgdatetime import timestamp_near +from .sgdatetime import SGDatetime from .tv import TVEpisode # noinspection PyUnreachableCode @@ -172,7 +172,7 @@ class TVCache(object): if sql_result: last_time = int(sql_result[0]['time']) - if last_time > int(timestamp_near(datetime.datetime.now())): + if last_time > SGDatetime.timestamp_near(): last_time = 0 else: last_time = 0 @@ -190,7 +190,7 @@ class TVCache(object): if sql_result: last_time = int(sql_result[0]['time']) - if last_time > int(timestamp_near(datetime.datetime.now())): + if last_time > SGDatetime.timestamp_near(): last_time = 0 else: last_time = 0 @@ -300,7 +300,7 @@ class TVCache(object): episode_text = '|%s|' % '|'.join(map(str, episode_numbers)) # get the current timestamp - cur_timestamp = int(timestamp_near(datetime.datetime.now())) + cur_timestamp = SGDatetime.timestamp_near() # get quality of release quality = parse_result.quality diff --git a/sickgear/webserve.py b/sickgear/webserve.py index 1a97fe8e..82cf7697 100644 --- a/sickgear/webserve.py +++ b/sickgear/webserve.py @@ -66,7 +66,7 @@ from .providers import newznab, rsstorrent from .scene_numbering import get_scene_absolute_numbering_for_show, get_scene_numbering_for_show, \ get_xem_absolute_numbering_for_show, get_xem_numbering_for_show, set_scene_numbering_helper from .search_backlog import FORCED_BACKLOG -from .sgdatetime import SGDatetime, timestamp_near +from .sgdatetime import SGDatetime from .show_name_helpers import abbr_showname from .show_updater import clean_ignore_require_words @@ -674,7 +674,7 @@ class RepoHandler(BaseStaticFileHandler): def get_watchedstate_updater_addon_xml(self): mem_key = 'kodi_xml' - if int(timestamp_near(datetime.datetime.now())) < sickgear.MEMCACHE.get(mem_key, {}).get('last_update', 0): + if SGDatetime.timestamp_near() < sickgear.MEMCACHE.get(mem_key, {}).get('last_update', 0): return sickgear.MEMCACHE.get(mem_key).get('data') filename = 'addon%s.xml' % self.kodi_include @@ -682,7 +682,7 @@ class RepoHandler(BaseStaticFileHandler): 'service.sickgear.watchedstate.updater', filename), 'r', encoding='utf8') as fh: xml = fh.read().strip() % dict(ADDON_VERSION=self.get_addon_version(self.kodi_include)) - sickgear.MEMCACHE[mem_key] = dict(last_update=30 + int(timestamp_near(datetime.datetime.now())), data=xml) + sickgear.MEMCACHE[mem_key] = dict(last_update=30 + SGDatetime.timestamp_near(), data=xml) return xml @staticmethod @@ -696,7 +696,7 @@ class RepoHandler(BaseStaticFileHandler): Must use an arg here instead of `self` due to static call use case from external class """ mem_key = 'kodi_ver' - if int(timestamp_near(datetime.datetime.now())) < sickgear.MEMCACHE.get(mem_key, {}).get('last_update', 0): + if SGDatetime.timestamp_near() < sickgear.MEMCACHE.get(mem_key, {}).get('last_update', 0): return sickgear.MEMCACHE.get(mem_key).get('data') filename = 'service%s.py' % kodi_include @@ -704,7 +704,7 @@ class RepoHandler(BaseStaticFileHandler): 'service.sickgear.watchedstate.updater', filename), 'r', encoding='utf8') as fh: version = re.findall(r'ADDON_VERSION\s*?=\s*?\'([^\']+)', fh.read())[0] - sickgear.MEMCACHE[mem_key] = dict(last_update=30 + int(timestamp_near(datetime.datetime.now())), data=version) + sickgear.MEMCACHE[mem_key] = dict(last_update=30 + SGDatetime.timestamp_near(), data=version) return version def render_kodi_repo_addon_xml(self): @@ -1465,7 +1465,7 @@ r.close() continue if bname in ep_results: - date_watched = now = int(timestamp_near(datetime.datetime.now())) + date_watched = now = SGDatetime.timestamp_near() if 1500000000 < date_watched: date_watched = helpers.try_int(float(v.get('date_watched'))) @@ -9589,8 +9589,8 @@ class CachedImages(MainHandler): dummy_file = '%s.%s.dummy' % (os.path.splitext(filename)[0], source) if os.path.isfile(dummy_file): if os.stat(dummy_file).st_mtime \ - < (int(timestamp_near((datetime.datetime.now() - - datetime.timedelta(days=days, minutes=minutes))))): + < (SGDatetime.timestamp_near(datetime.datetime.now() + - datetime.timedelta(days=days, minutes=minutes))): CachedImages.delete_dummy_image(dummy_file) else: result = False @@ -9695,7 +9695,7 @@ class CachedImages(MainHandler): """ if not os.path.isfile(filename) or \ os.stat(filename).st_mtime < \ - (int(timestamp_near((datetime.datetime.now() - datetime.timedelta(days=days))))): + SGDatetime.timestamp_near(td=datetime.timedelta(days=days)): return True return False From 48d4b505a37f57bd52e8583ae29d16bfd692c3cb Mon Sep 17 00:00:00 2001 From: JackDandy Date: Thu, 9 Mar 2023 13:51:48 +0000 Subject: [PATCH 15/21] Change re-add deprecated endpoint used by external scripts. --- CHANGES.md | 1 + sickgear/webserveInit.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index a784afc0..808c1466 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -15,6 +15,7 @@ * Add logging around the restart/shutdown event * Update package resource API 63.2.0 (3ae44cd) to 67.3.2 (b9bf2ec) * Refactor `timestamp_near` to `SGDatetime.timestamp_near` +* Change re-add deprecated endpoint used by external scripts ### 3.27.12 (2023-03-08 23:30:00 UTC) diff --git a/sickgear/webserveInit.py b/sickgear/webserveInit.py index 8a46f1d4..98e32d16 100644 --- a/sickgear/webserveInit.py +++ b/sickgear/webserveInit.py @@ -214,6 +214,9 @@ class WebServer(threading.Thread): (r'%s/api/builder(/?)(.*)' % self.options['web_root'], webserve.ApiBuilder), (r'%s/api(/?.*)' % self.options['web_root'], webapi.Api), # ---------------------------------------------------------------------------------------------------------- + # legacy deprecated Aug 2019 - NEVER remove as used in external scripts + (r'%s/home/postprocess(/?.*)' % self.options['web_root'], webserve.HomeProcessMedia), + (r'%s(/?update_watched_state_kodi/?)' % self.options['web_root'], webserve.NoXSRFHandler), # regular catchall routes - keep here at the bottom (r'%s/home(/?.*)' % self.options['web_root'], webserve.Home), (r'%s/manage/(/?.*)' % self.options['web_root'], webserve.Manage), From 68b8cbcddea00cdf34b0c2ac84ca9cafe3fe0eb9 Mon Sep 17 00:00:00 2001 From: JackDandy Date: Fri, 10 Mar 2023 01:49:29 +0000 Subject: [PATCH 16/21] Change deprecate processEpisode used by nzbToMedia to advise how to configure API instead. --- CHANGES.md | 1 + gui/slick/interfaces/default/inc_top.tmpl | 7 +++++++ sickgear/webserve.py | 15 ++++++--------- sickgear/webserveInit.py | 1 - 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 808c1466..de75eaeb 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,6 +8,7 @@ * Change requirements for pure py3 * Change codebase cleanups * Change improve perf by using generators with `any` +* Change deprecate processEpisode used by nzbToMedia to advise how to configure API instead [develop changelog] diff --git a/gui/slick/interfaces/default/inc_top.tmpl b/gui/slick/interfaces/default/inc_top.tmpl index 11f1e410..61a92afd 100644 --- a/gui/slick/interfaces/default/inc_top.tmpl +++ b/gui/slick/interfaces/default/inc_top.tmpl @@ -395,6 +395,13 @@
    #end if ## +#if $sickgear.MEMCACHE.get('DEPRECATE_PP_LEGACY') +
    +

    Mar 2020: The `nzbToMedia` script began using the secure API to process media

    +

    Mar 2023: To remove this red box, please follow this guidance

    +
    +#end if +## #set $items = [] #try #set void = $items.append($topmenu) diff --git a/sickgear/webserve.py b/sickgear/webserve.py index 82cf7697..dbf0e7cc 100644 --- a/sickgear/webserve.py +++ b/sickgear/webserve.py @@ -3932,16 +3932,13 @@ class HomeProcessMedia(Home): return self._generic_message('Postprocessing results', f'
    {result}
    ') # noinspection PyPep8Naming - def processEpisode(self, dir_name=None, nzb_name=None, process_type=None, **kwargs): - """ legacy function name, stubbed but can _not_ be removed as this - is potentially used in pp scripts located outside of SG path (need to verify this) + @staticmethod + def processEpisode(**kwargs): + """ legacy function name, stubbed and will be removed """ - kwargs['dir_name'] = dir_name or kwargs.pop('dir', None) - kwargs['nzb_name'] = nzb_name or kwargs.pop('nzbName', None) - kwargs['process_type'] = process_type or kwargs.pop('type', 'auto') - kwargs['pp_version'] = kwargs.pop('ppVersion', '0') - return self.process_files(**kwargs) - + logger.error('This endpoint is no longer to be used,' + ' nzbToMedia users please follow: https://github.com/SickGear/SickGear/wiki/FAQ-nzbToMedia') + sickgear.MEMCACHE['DEPRECATE_PP_LEGACY'] = True class AddShows(Home): diff --git a/sickgear/webserveInit.py b/sickgear/webserveInit.py index 98e32d16..c03d6258 100644 --- a/sickgear/webserveInit.py +++ b/sickgear/webserveInit.py @@ -216,7 +216,6 @@ class WebServer(threading.Thread): # ---------------------------------------------------------------------------------------------------------- # legacy deprecated Aug 2019 - NEVER remove as used in external scripts (r'%s/home/postprocess(/?.*)' % self.options['web_root'], webserve.HomeProcessMedia), - (r'%s(/?update_watched_state_kodi/?)' % self.options['web_root'], webserve.NoXSRFHandler), # regular catchall routes - keep here at the bottom (r'%s/home(/?.*)' % self.options['web_root'], webserve.Home), (r'%s/manage/(/?.*)' % self.options['web_root'], webserve.Manage), From e239b81a185bfef61c248ba454460825bf402d30 Mon Sep 17 00:00:00 2001 From: Prinz23 Date: Sat, 11 Mar 2023 11:48:27 +0000 Subject: [PATCH 17/21] Change optionally add disk free space in response to three Web API endpoints. Change increase API version number to 15. Add actually use mount points to get disk free space. Add optional "freespace" parameter to endpoints: sg.getrootdirs, sg.addrootdir, sg.deleterootdir Change update help of affected endpoints. Fix explicitly save rootdirs after adding or deleting via Web API. --- CHANGES.md | 6 ++ gui/slick/interfaces/default/apiBuilder.tmpl | 24 +++++-- sickgear/helpers.py | 33 +++++---- sickgear/webapi.py | 75 +++++++++++++------- 4 files changed, 93 insertions(+), 45 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index de75eaeb..b367eae5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,6 +9,12 @@ * Change codebase cleanups * Change improve perf by using generators with `any` * Change deprecate processEpisode used by nzbToMedia to advise how to configure API instead +* Change optionally add disk free space in response to three Web API endpoints +* Change increase API version number to 15 +* Add actually use mount points to get disk free space +* Add optional "freespace" parameter to endpoints: sg.getrootdirs, sg.addrootdir, sg.deleterootdir +* Change update help of affected endpoints +* Fix explicitly save rootdirs after adding or deleting via Web API [develop changelog] diff --git a/gui/slick/interfaces/default/apiBuilder.tmpl b/gui/slick/interfaces/default/apiBuilder.tmpl index acf1fab1..4b696fb4 100644 --- a/gui/slick/interfaces/default/apiBuilder.tmpl +++ b/gui/slick/interfaces/default/apiBuilder.tmpl @@ -70,11 +70,11 @@ addList("Command", "Help", "?cmd=help", "sg.functions-list", "","", "default"); addOption("sg.functions-list", "$k", "&subject=$k", "", "", "#echo ('sb', 'sg')['sg' in $k]#") #end for addList("Command", "SickBeard.AddRootDir", "?cmd=sb.addrootdir", "sb.addrootdir"); -addList("Command", "SickGear.AddRootDir", "?cmd=sg.addrootdir", "sb.addrootdir"); +addList("Command", "SickGear.AddRootDir", "?cmd=sg.addrootdir", "sg.addrootdir"); addOption("Command", "SickBeard.CheckScheduler", "?cmd=sb.checkscheduler"); addOption("Command", "SickGear.CheckScheduler", "?cmd=sg.checkscheduler"); addList("Command", "SickBeard.DeleteRootDir", "?cmd=sb.deleterootdir", "sb.deleterootdir"); -addList("Command", "SickGear.DeleteRootDir", "?cmd=sg.deleterootdir", "sb.deleterootdir"); +addList("Command", "SickGear.DeleteRootDir", "?cmd=sg.deleterootdir", "sg.deleterootdir"); addOption("Command", "SickBeard.ForceSearch", "?cmd=sb.forcesearch"); addList("Command", "SickGear.ForceSearch", "?cmd=sg.forcesearch", "sg.forcesearch"); addOption("Command", "SickGear.SearchQueue", "?cmd=sg.searchqueue"); @@ -88,7 +88,7 @@ addList("Command", "SickGear.GetIndexers", "?cmd=sg.getindexers", "listindexers" addList("Command", "SickGear.GetIndexerIcon", "?cmd=sg.getindexericon", "getindexericon"); addList("Command", "SickGear.GetNetworkIcon", "?cmd=sg.getnetworkicon", "getnetworkicon"); addOption("Command", "SickBeard.GetRootDirs", "?cmd=sb.getrootdirs"); -addOption("Command", "SickGear.GetRootDirs", "?cmd=sg.getrootdirs"); +addList("Command", "SickGear.GetRootDirs", "?cmd=sg.getrootdirs", "sg.addfreespace"); addList("Command", "SickBeard.PauseBacklog", "?cmd=sb.pausebacklog", "sb.pausebacklog"); addList("Command", "SickGear.PauseBacklog", "?cmd=sg.pausebacklog", "sb.pausebacklog"); addOption("Command", "SickBeard.Ping", "?cmd=sb.ping"); @@ -621,10 +621,26 @@ addOption("sb.addrootdir-opt", "Optional Param", "", 1); addOption("sb.addrootdir-opt", "Default", "&default=1"); addOption("sb.addrootdir-opt", "Not Default", "&default=0"); -addOption("sb.deleterootdir", "C:\\Temp", "&location=C:\\Temp", "", 1); +addList("sg.addrootdir", "C:\\Temp", "&location=C:\\Temp", "sg.addrootdir-opt"); +addList("sg.addrootdir", "/usr/bin", "&location=/usr/bin/", "sg.addrootdir-opt"); +addList("sg.addrootdir", "S:\\Invalid_Location", "&location=S:\\Invalid_Location", "sg.addrootdir-opt"); + +addList("sg.addrootdir-opt", "Optional Param", "", "sg.addfreespace"); +addList("sg.addrootdir-opt", "Default", "&default=1", "sg.addfreespace"); +addList("sg.addrootdir-opt", "Not Default", "&default=0", "sg.addfreespace"); + +addOption("sb.deleterootdir", "C:\\Temp", "&location=C:\\Temp", 1); addOption("sb.deleterootdir", "/usr/bin", "&location=/usr/bin/"); addOption("sb.deleterootdir", "S:\\Invalid_Location", "&location=S:\\Invalid_Location"); +addList("sg.deleterootdir", "C:\\Temp", "&location=C:\\Temp", "sg.addfreespace"); +addList("sg.deleterootdir", "/usr/bin", "&location=/usr/bin/", "sg.addfreespace"); +addList("sg.deleterootdir", "S:\\Invalid_Location", "&location=S:\\Invalid_Location", "sg.addfreespace"); + +addOption("sg.addfreespace", "Optional Param", "", 1) +addOption("sg.addfreespace", "incl Freespace", "&freespace=1") +addOption("sg.addfreespace", "excl Freespace", "&freespace=0") + #for $cur_show_obj in $sortedShowList: addList("show.pause", "$cur_show_obj.name", "&indexerid=$cur_show_obj.prodid", "show.pause-opt"); #end for diff --git a/sickgear/helpers.py b/sickgear/helpers.py index 4c78e787..6d036afa 100644 --- a/sickgear/helpers.py +++ b/sickgear/helpers.py @@ -34,7 +34,7 @@ import sickgear from . import db, logger, notifiers from .common import cpu_presets, mediaExtensions, Overview, Quality, statusStrings, subtitleExtensions, \ ARCHIVED, DOWNLOADED, FAILED, IGNORED, SKIPPED, SNATCHED_ANY, SUBTITLED, UNAIRED, UNKNOWN, WANTED -from .sgdatetime import SGDatetime +from .sgdatetime import SGDatetime from lib.tvinfo_base.exceptions import * from exceptions_helper import ex, MultipleShowObjectsException @@ -1031,7 +1031,7 @@ def clear_cache(force=False): """ # clean out cache directory, remove everything > 12 hours old dirty = None - del_time = SGDatetime.timestamp_near(td=datetime.timedelta(hours=12)) + del_time = SGDatetime.timestamp_near(td=datetime.timedelta(hours=12)) direntry_args = dict(follow_symlinks=False) for direntry in scantree(sickgear.CACHE_DIR, ['images|rss|zoneinfo'], follow_symlinks=True): if direntry.is_file(**direntry_args) and (force or del_time > direntry.stat(**direntry_args).st_mtime): @@ -1342,7 +1342,7 @@ def delete_not_changed_in(paths, days=30, minutes=0): :param minutes: Purge files not modified in this number of minutes (default: 0 minutes) :return: tuple; number of files that qualify for deletion, number of qualifying files that failed to be deleted """ - del_time = SGDatetime.timestamp_near(td=datetime.timedelta(days=days, minutes=minutes)) + del_time = SGDatetime.timestamp_near(td=datetime.timedelta(days=days, minutes=minutes)) errors = 0 qualified = 0 for cur_path in (paths, [paths])[not isinstance(paths, list)]: @@ -1367,7 +1367,7 @@ def set_file_timestamp(filename, min_age=3, new_time=None): :param new_time: :type new_time: None or int """ - min_time = SGDatetime.timestamp_near(td=datetime.timedelta(days=min_age)) + min_time = SGDatetime.timestamp_near(td=datetime.timedelta(days=min_age)) try: if os.path.isfile(filename) and os.path.getmtime(filename) < min_time: os.utime(filename, new_time) @@ -1412,6 +1412,19 @@ def is_link(filepath): return os.path.islink(filepath) +def find_mount_point(path): + # type: (AnyStr) -> AnyStr + """ + returns the mount point for the given path + :param path: path to find the mount point + :return: mount point for path + """ + path = os.path.realpath(os.path.abspath(path)) + while not os.path.ismount(path): + path = os.path.dirname(path) + return path + + def df(): """ Return disk free space at known parent locations @@ -1424,17 +1437,9 @@ def df(): if sickgear.ROOT_DIRS and sickgear.DISPLAY_FREESPACE: targets = [] for path in sickgear.ROOT_DIRS.split('|')[1:]: - location_parts = os.path.splitdrive(path) - target = location_parts[0] - if 'win32' == sys.platform: - if not re.match('(?i)[a-z]:(?:\\\\)?$', target): - # simple drive letter not found, fallback to full path - target = path - min_output = False - elif sys.platform.startswith(('linux', 'darwin', 'sunos5')) or 'bsd' in sys.platform: - target = path - min_output = False + target = find_mount_point(path) if target and target not in targets: + min_output = False targets += [target] free = freespace(path) if None is not free: diff --git a/sickgear/webapi.py b/sickgear/webapi.py index f75c426a..48511e23 100644 --- a/sickgear/webapi.py +++ b/sickgear/webapi.py @@ -43,7 +43,7 @@ from . import classes, db, helpers, history, image_cache, logger, network_timezo from .common import ARCHIVED, DOWNLOADED, FAILED, IGNORED, SKIPPED, SNATCHED, SNATCHED_ANY, SNATCHED_BEST, \ SNATCHED_PROPER, UNAIRED, UNKNOWN, WANTED, Quality, qualityPresetStrings, statusStrings from .name_parser.parser import NameParser -from .helpers import starify +from .helpers import df, find_mount_point, starify from .indexers import indexer_api, indexer_config from .indexers.indexer_config import * from lib.tvinfo_base.exceptions import * @@ -150,7 +150,7 @@ else: class Api(webserve.BaseHandler): """ api class that returns json results """ - version = 14 # use an int since float-point is unpredictable + version = 15 # use an int since float-point is unpredictable def check_xsrf_cookie(self): pass @@ -801,38 +801,45 @@ def _getQualityMap(): return quality_map_inversed -def _getRootDirs(): - if "" == sickgear.ROOT_DIRS: - return {} +def _get_root_dirs(get_freespace=False): + # type: (bool) -> List[Dict] + """ + + :param get_freespace: include disk free space info in response + """ + dir_list = [] + if not sickgear.ROOT_DIRS: + return dir_list - rootDir = {} root_dirs = sickgear.ROOT_DIRS.split('|') - default_index = int(sickgear.ROOT_DIRS.split('|')[0]) - - rootDir["default_index"] = int(sickgear.ROOT_DIRS.split('|')[0]) - # remove default_index value from list (this fixes the offset) - root_dirs.pop(0) + default_index = int(root_dirs.pop(0)) if len(root_dirs) < default_index: - return {} + return dir_list # clean up the list - replace %xx escapes by their single-character equivalent root_dirs = [unquote_plus(x) for x in root_dirs] default_dir = root_dirs[default_index] - dir_list = [] - for root_dir in root_dirs: - valid = 1 + if root_dirs and get_freespace: + diskfree, _ = df() + + for cur_root_dir in root_dirs: try: - os.listdir(root_dir) + os.listdir(cur_root_dir) + valid = 1 except (BaseException, Exception): valid = 0 - default = 0 - if root_dir is default_dir: - default = 1 - dir_list.append({'valid': valid, 'location': root_dir, 'default': default}) + new_entry = {'valid': valid, 'location': cur_root_dir, 'default': int(cur_root_dir is default_dir)} + + if get_freespace: + # noinspection PyUnboundLocalVariable + new_entry.update({'free_space': next((space for disk, space in diskfree or [] + if disk == find_mount_point(cur_root_dir)), '')}) + + dir_list.append(new_entry) return dir_list @@ -1975,7 +1982,8 @@ class CMD_SickGearAddRootDir(ApiCall): _help = {"desc": "add a user configured parent directory", "requiredParameters": {"location": {"desc": "the full path to root (parent) directory"} }, - "optionalParameters": {"default": {"desc": "make the location passed the default root (parent) directory"} + "optionalParameters": {"default": {"desc": "make the location passed the default root (parent) directory"}, + "freespace": {"desc": "include free space of paths in response"} } } @@ -1984,6 +1992,7 @@ class CMD_SickGearAddRootDir(ApiCall): self.location, args = self.check_params(args, kwargs, "location", None, True, "string", []) # optional self.default, args = self.check_params(args, kwargs, "default", 0, False, "bool", []) + self.freespace, args = self.check_params(args, kwargs, "freespace", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, handler, args, kwargs) @@ -2026,7 +2035,9 @@ class CMD_SickGearAddRootDir(ApiCall): root_dirs_new = '|'.join([text_type(x) for x in root_dirs_new]) sickgear.ROOT_DIRS = root_dirs_new - return _responds(RESULT_SUCCESS, _getRootDirs(), msg="Root directories updated") + sickgear.save_config() + return _responds(RESULT_SUCCESS, _get_root_dirs(not self.sickbeard_call and self.freespace), + msg="Root directories updated") class CMD_SickBeardAddRootDir(CMD_SickGearAddRootDir): @@ -2084,20 +2095,24 @@ class CMD_SickBeardCheckScheduler(CMD_SickGearCheckScheduler): class CMD_SickGearDeleteRootDir(ApiCall): _help = {"desc": "delete a user configured parent directory", - "requiredParameters": {"location": {"desc": "the full path to root (parent) directory"}} + "requiredParameters": {"location": {"desc": "the full path to root (parent) directory"}}, + "optionalParameters": {"freespace": {"desc": "include free space of paths in response"} + } } def __init__(self, handler, args, kwargs): # required self.location, args = self.check_params(args, kwargs, "location", None, True, "string", []) # optional + self.freespace, args = self.check_params(args, kwargs, "freespace", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, handler, args, kwargs) def run(self): """ delete a user configured parent directory """ if sickgear.ROOT_DIRS == "": - return _responds(RESULT_FAILURE, _getRootDirs(), msg="No root directories detected") + return _responds(RESULT_FAILURE, _get_root_dirs(not self.sickbeard_call and self.freespace), + msg="No root directories detected") newIndex = 0 root_dirs_new = [] @@ -2124,8 +2139,10 @@ class CMD_SickGearDeleteRootDir(ApiCall): root_dirs_new = "|".join([text_type(x) for x in root_dirs_new]) sickgear.ROOT_DIRS = root_dirs_new + sickgear.save_config() # what if the root dir was not found? - return _responds(RESULT_SUCCESS, _getRootDirs(), msg="Root directory deleted") + return _responds(RESULT_SUCCESS, _get_root_dirs(not self.sickbeard_call and self.freespace), + msg="Root directory deleted") class CMD_SickBeardDeleteRootDir(CMD_SickGearDeleteRootDir): @@ -2374,18 +2391,22 @@ class CMD_SickGearGetqualityStrings(ApiCall): class CMD_SickGearGetRootDirs(ApiCall): - _help = {"desc": "get list of user configured parent directories"} + _help = {"desc": "get list of user configured parent directories", + "optionalParameters": {"freespace": {"desc": "include free space of paths in response"} + } + } def __init__(self, handler, args, kwargs): # required # optional + self.freespace, args = self.check_params(args, kwargs, "freespace", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, handler, args, kwargs) def run(self): """ get list of user configured parent directories """ - return _responds(RESULT_SUCCESS, _getRootDirs()) + return _responds(RESULT_SUCCESS, _get_root_dirs(not self.sickbeard_call and self.freespace)) class CMD_SickBeardGetRootDirs(CMD_SickGearGetRootDirs): From 344fb19575cddbab0f9d5c83dac025e20a474f4b Mon Sep 17 00:00:00 2001 From: Prinz23 Date: Mon, 13 Mar 2023 01:51:01 +0000 Subject: [PATCH 18/21] Fix unavailable paths. Change catch potential exception because of unmounting of path during finding mount point. Change add disabled message to webapi for freespace --- sickgear/helpers.py | 20 ++++++++++++++++---- sickgear/webapi.py | 11 +++++++---- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/sickgear/helpers.py b/sickgear/helpers.py index 6d036afa..2197c617 100644 --- a/sickgear/helpers.py +++ b/sickgear/helpers.py @@ -1419,18 +1419,28 @@ def find_mount_point(path): :param path: path to find the mount point :return: mount point for path """ + if not os.path.exists(path): + return path + org_path = path path = os.path.realpath(os.path.abspath(path)) - while not os.path.ismount(path): - path = os.path.dirname(path) + try: + while not os.path.ismount(path): + new_path = os.path.dirname(path) + if new_path == path: + # in case no mount point was found return original path + return org_path + path = new_path + except (BaseException, Exception): + return org_path return path def df(): + # type: (...) -> Tuple[List[Tuple[AnyStr, AnyStr]], bool] """ Return disk free space at known parent locations :return: string path, string value that is formatted size - :rtype: Tuple[List[Tuple[AnyStr, AnyStr]], bool] """ result = [] min_output = True @@ -1441,9 +1451,11 @@ def df(): if target and target not in targets: min_output = False targets += [target] - free = freespace(path) + free = freespace(target) if None is not free: result += [(target, sizeof_fmt(free).replace(' ', ''))] + else: + result += [(target, 'unavailable')] return result, min_output diff --git a/sickgear/webapi.py b/sickgear/webapi.py index 48511e23..e6e637ab 100644 --- a/sickgear/webapi.py +++ b/sickgear/webapi.py @@ -822,7 +822,7 @@ def _get_root_dirs(get_freespace=False): default_dir = root_dirs[default_index] - if root_dirs and get_freespace: + if root_dirs and get_freespace and sickgear.DISPLAY_FREESPACE: diskfree, _ = df() for cur_root_dir in root_dirs: @@ -835,9 +835,12 @@ def _get_root_dirs(get_freespace=False): new_entry = {'valid': valid, 'location': cur_root_dir, 'default': int(cur_root_dir is default_dir)} if get_freespace: - # noinspection PyUnboundLocalVariable - new_entry.update({'free_space': next((space for disk, space in diskfree or [] - if disk == find_mount_point(cur_root_dir)), '')}) + if sickgear.DISPLAY_FREESPACE: + # noinspection PyUnboundLocalVariable + new_entry.update({'free_space': next((space for disk, space in diskfree or [] + if disk == find_mount_point(cur_root_dir)), '')}) + else: + new_entry.update({'free_space': 'Required setting "Display freespace" is not enabled'}) dir_list.append(new_entry) From 1e161f376da8980c12d7e0d3cf86b27299df9a8b Mon Sep 17 00:00:00 2001 From: JackDandy Date: Tue, 14 Mar 2023 00:30:08 +0000 Subject: [PATCH 19/21] Fix UI by marking resource as unavailable if cannot determine its free space. Change replace copyFile placeholder code with copy_file override. Change replace moveFile placeholder code with move_file override. Change replace tryInt placeholder code with try_int override. Change refactor disk free space logic. Change restore min_output logic. --- CHANGES.md | 1 + sickgear/helpers.py | 111 ++++++++++++++++++-------------------------- sickgear/webapi.py | 10 ++-- 3 files changed, 50 insertions(+), 72 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index b367eae5..489e4b3e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -23,6 +23,7 @@ * Update package resource API 63.2.0 (3ae44cd) to 67.3.2 (b9bf2ec) * Refactor `timestamp_near` to `SGDatetime.timestamp_near` * Change re-add deprecated endpoint used by external scripts +* Fix UI by marking resource as unavailable if cannot determine its free space ### 3.27.12 (2023-03-08 23:30:00 UTC) diff --git a/sickgear/helpers.py b/sickgear/helpers.py index 2197c617..117ec034 100644 --- a/sickgear/helpers.py +++ b/sickgear/helpers.py @@ -57,6 +57,11 @@ from sg_helpers import chmod_as_parent, clean_data, copy_file, download_file, fi get_url, indent_xml, make_path, maybe_plural, md5_for_text, move_file, proxy_setting, remove_file, \ remove_file_perm, replace_extension, sanitize_filename, scantree, touch_file, try_int, try_ord, write_file +# deprecated item, remove in 2020, kept here as rollback uses it +copyFile = copy_file +moveFile = move_file +tryInt = try_int # one legacy custom provider is keeping this signature here + # noinspection PyUnreachableCode if False: # noinspection PyUnresolvedReferences @@ -319,19 +324,20 @@ def search_infosrc_for_show_id(reg_show_name, tvid=None, prodid=None, ui=None): return None, None, None -def sizeof_fmt(num): +def sizeof_fmt(number, digits=1, sep=' '): + # type: (int, int, AnyStr) -> AnyStr """ - format given bytes to human readable string + format given bytes to human-readable text - :param num: number - :type num: int or long - :return: human readable formatted string - :rtype: AnyStr + :param number: value to convert + :param digits: number of digits after decimal point + :param sep: seperater of value and dimension + :return: human-readable formatted text """ - for x in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']: - if 1024.0 > num: - return "%3.1f %s" % (num, x) - num /= 1024.0 + for cur_dimension in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']: + if 1024.0 > number: + return f'{number:3.{digits}f}{sep}{cur_dimension}' + number /= 1024.0 def list_media_files(path): @@ -352,30 +358,6 @@ def list_media_files(path): return result -def copyFile(src_file, dest_file): - """ deprecated_item, remove in 2020, kept here as rollback uses it - :param src_file: source file - :type src_file: AnyStr - :param dest_file: destination file - :type dest_file: AnyStr - :return: nothing - :rtype: None - """ - return copy_file(src_file, dest_file) - - -def moveFile(src_file, dest_file): - """ deprecated_item, remove in 2020, kept here as rollback uses it - :param src_file: source file - :type src_file: AnyStr - :param dest_file: destination file - :type dest_file: AnyStr - :return: nothing - :rtype: None - """ - return move_file(src_file, dest_file) - - def link(src_file, dest_file): """ @@ -759,12 +741,6 @@ def restore_versioned_file(backup_file, version): return True -# one legacy custom provider is keeping this signature here, -# a monkey patch could fix that so that this can be removed -def tryInt(s, s_default=0): - return try_int(s, s_default) - - # try to convert to float, return default on failure def try_float(s, s_default=0.0): try: @@ -1416,23 +1392,23 @@ def find_mount_point(path): # type: (AnyStr) -> AnyStr """ returns the mount point for the given path - :param path: path to find the mount point - :return: mount point for path + + :param path: to find the mount path + :return: mount point for path or path if no mount """ - if not os.path.exists(path): - return path - org_path = path - path = os.path.realpath(os.path.abspath(path)) - try: - while not os.path.ismount(path): - new_path = os.path.dirname(path) - if new_path == path: - # in case no mount point was found return original path - return org_path - path = new_path - except (BaseException, Exception): - return org_path - return path + result = path + if os.path.exists(path): + result = os.path.realpath(os.path.abspath(path)) + try: + while not os.path.ismount(result): + new_path = os.path.dirname(result) + if new_path == result: + # return input path if mount point not found + return path + result = new_path + except (BaseException, Exception): + return path + return result def df(): @@ -1443,19 +1419,22 @@ def df(): :return: string path, string value that is formatted size """ result = [] - min_output = True + min_output = True # flag ui to output minimal (e.g. vol: size, vol: size) if sickgear.ROOT_DIRS and sickgear.DISPLAY_FREESPACE: targets = [] - for path in sickgear.ROOT_DIRS.split('|')[1:]: - target = find_mount_point(path) - if target and target not in targets: + for cur_target in filter(lambda _t: _t and _t not in targets, + map(find_mount_point, sickgear.ROOT_DIRS.split('|')[1:])): + targets += [cur_target] + free = freespace(cur_target) + if 'win32' == sys.platform and None is not free: + cur_target = os.path.splitdrive(cur_target)[0] + if any(['win32' == sys.platform and not re.match('(?i)[a-z]:(\\\\)?$', cur_target), + # Windows, if a simple drive letter isn't found, fallback to full path. On Linux, full path is used + # trigger ui to output long paths instead of minimal volume letters layout + sys.platform.startswith(('linux', 'darwin', 'sunos5')), 'bsd' in sys.platform]): min_output = False - targets += [target] - free = freespace(target) - if None is not free: - result += [(target, sizeof_fmt(free).replace(' ', ''))] - else: - result += [(target, 'unavailable')] + result += [(cur_target, 'unavailable' if None is free else sizeof_fmt(free, sep=''))] + return result, min_output diff --git a/sickgear/webapi.py b/sickgear/webapi.py index e6e637ab..908a32eb 100644 --- a/sickgear/webapi.py +++ b/sickgear/webapi.py @@ -835,12 +835,10 @@ def _get_root_dirs(get_freespace=False): new_entry = {'valid': valid, 'location': cur_root_dir, 'default': int(cur_root_dir is default_dir)} if get_freespace: - if sickgear.DISPLAY_FREESPACE: - # noinspection PyUnboundLocalVariable - new_entry.update({'free_space': next((space for disk, space in diskfree or [] - if disk == find_mount_point(cur_root_dir)), '')}) - else: - new_entry.update({'free_space': 'Required setting "Display freespace" is not enabled'}) + # noinspection PyUnboundLocalVariable + new_entry['free_space'] = 'Required setting "Display freespace" is not enabled' \ + if not sickgear.DISPLAY_FREESPACE \ + else next((_space for _disk, _space in diskfree or [] if _disk == find_mount_point(cur_root_dir)), '') dir_list.append(new_entry) From ffff95fe34f92044c12c3099d37ea1976144741d Mon Sep 17 00:00:00 2001 From: JackDandy Date: Sun, 2 Apr 2023 23:18:05 +0100 Subject: [PATCH 20/21] Change add Rarbg UHD search category. --- CHANGES.md | 1 + sickgear/providers/rarbg.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 489e4b3e..e3702c5f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -15,6 +15,7 @@ * Add optional "freespace" parameter to endpoints: sg.getrootdirs, sg.addrootdir, sg.deleterootdir * Change update help of affected endpoints * Fix explicitly save rootdirs after adding or deleting via Web API +* Change add Rarbg UHD search category [develop changelog] diff --git a/sickgear/providers/rarbg.py b/sickgear/providers/rarbg.py index 8889bfc3..b5b735b5 100644 --- a/sickgear/providers/rarbg.py +++ b/sickgear/providers/rarbg.py @@ -41,7 +41,7 @@ class RarbgProvider(generic.TorrentProvider): 'api_list': self.url_api + 'mode=list', 'api_search': self.url_api + 'mode=search'} - self.params = {'defaults': '&format=json_extended&category=18;41&limit=100&sort=last&ranked={r}&token={t}', + self.params = {'defaults': '&format=json_extended&category=18;41;49&limit=100&sort=last&ranked={r}&token={t}', 'param_iid': '&search_imdb=%(sid)s', 'param_tid': '&search_tvdb=%(sid)s', 'param_str': '&search_string=%(str)s', From ae29264c24f1c552b9adaca467bc752f61e9cbb1 Mon Sep 17 00:00:00 2001 From: JackDandy Date: Wed, 12 Apr 2023 13:07:29 +0100 Subject: [PATCH 21/21] Prepare release, bump date. --- CHANGES.md | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index b0fbd244..d9ba3777 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,4 @@ -### 3.28.0 (2023-xx-xx xx:xx:00 UTC) +### 3.28.0 (2023-04-12 13:05:00 UTC) * Update html5lib 1.1 (f87487a) to 1.2-dev (3e500bb) * Update package resource API 63.2.0 (3ae44cd) to 67.5.1 (f51eccd) @@ -18,15 +18,6 @@ * Change add Rarbg UHD search category -[develop changelog] - -* Add logging around the restart/shutdown event -* Update package resource API 63.2.0 (3ae44cd) to 67.3.2 (b9bf2ec) -* Refactor `timestamp_near` to `SGDatetime.timestamp_near` -* Change re-add deprecated endpoint used by external scripts -* Fix UI by marking resource as unavailable if cannot determine its free space - - ### 3.27.13 (2023-04-12 10:15:00 UTC) * Change fix show id log output