From ae4173e8c0498c347571ecbd365d04e4140bbcf9 Mon Sep 17 00:00:00 2001 From: JackDandy Date: Wed, 8 Mar 2023 13:44:20 +0000 Subject: [PATCH] Change py2 unicode into f-strings or simple strings where appropriate. Change use specific logger functions for debug, warning, error. --- _cleaner.py | 6 +- lib/api_trakt/trakt.py | 22 +- lib/api_tvdb/tvdb_api.py | 26 +- lib/api_tvdb/tvdb_ui.py | 4 +- lib/certgen.py | 10 +- lib/plex/plex.py | 2 +- lib/sg_helpers.py | 53 ++-- sickgear.py | 75 +++-- sickgear/__init__.py | 24 +- sickgear/anime.py | 30 +- sickgear/auto_post_processer.py | 7 +- sickgear/browser.py | 2 +- sickgear/clients/deluge.py | 15 +- sickgear/clients/download_station.py | 12 +- sickgear/clients/generic.py | 47 ++- sickgear/clients/qbittorrent.py | 16 +- sickgear/clients/rtorrent.py | 6 +- sickgear/clients/transmission.py | 4 +- sickgear/common.py | 10 +- sickgear/config.py | 58 ++-- sickgear/databases/mainDB.py | 206 +++++++------ sickgear/db.py | 58 ++-- sickgear/failedProcessor.py | 25 +- sickgear/failed_history.py | 20 +- sickgear/generic_queue.py | 14 +- sickgear/helpers.py | 104 +++---- sickgear/image_cache.py | 54 ++-- sickgear/indexermapper.py | 2 +- sickgear/logger.py | 2 +- sickgear/metadata/generic.py | 133 ++++----- sickgear/metadata/helpers.py | 6 +- sickgear/metadata/kodi.py | 28 +- sickgear/metadata/mede8er.py | 20 +- sickgear/metadata/mediabrowser.py | 25 +- sickgear/metadata/tivo.py | 19 +- sickgear/metadata/wdtv.py | 8 +- sickgear/metadata/xbmc_12plus.py | 22 +- sickgear/name_parser/parser.py | 24 +- sickgear/naming.py | 26 +- sickgear/network_timezones.py | 29 +- sickgear/notifiers/boxcar2.py | 4 +- sickgear/notifiers/emailnotify.py | 18 +- sickgear/notifiers/emby.py | 26 +- sickgear/notifiers/generic.py | 8 +- sickgear/notifiers/growl.py | 2 +- sickgear/notifiers/kodi.py | 69 +++-- sickgear/notifiers/libnotify.py | 14 +- sickgear/notifiers/nmj.py | 36 +-- sickgear/notifiers/nmjv2.py | 22 +- sickgear/notifiers/plex.py | 32 +- sickgear/notifiers/prowl.py | 2 +- sickgear/notifiers/pushalot.py | 4 +- sickgear/notifiers/pushbullet.py | 2 +- sickgear/notifiers/pytivo.py | 10 +- sickgear/notifiers/synoindex.py | 6 +- sickgear/notifiers/synologynotifier.py | 6 +- sickgear/notifiers/telegram.py | 4 +- sickgear/notifiers/xbmc.py | 94 +++--- sickgear/nzbSplitter.py | 32 +- sickgear/nzbget.py | 22 +- sickgear/postProcessor.py | 234 +++++++-------- sickgear/processTV.py | 131 ++++----- sickgear/properFinder.py | 43 ++- sickgear/providers/__init__.py | 5 +- sickgear/providers/alpharatio.py | 2 +- sickgear/providers/bithdtv.py | 4 +- sickgear/providers/blutopia.py | 6 +- sickgear/providers/btn.py | 13 +- sickgear/providers/eztv.py | 2 +- sickgear/providers/fano.py | 2 +- sickgear/providers/filelist.py | 2 +- sickgear/providers/filesharingtalk.py | 4 +- sickgear/providers/funfile.py | 2 +- sickgear/providers/generic.py | 114 ++++---- sickgear/providers/hdbits.py | 6 +- sickgear/providers/hdspace.py | 2 +- sickgear/providers/hdtorrents.py | 2 +- sickgear/providers/iptorrents.py | 4 +- sickgear/providers/limetorrents.py | 4 +- sickgear/providers/magnetdl.py | 2 +- sickgear/providers/morethan.py | 2 +- sickgear/providers/ncore.py | 2 +- sickgear/providers/nebulance.py | 2 +- sickgear/providers/newznab.py | 24 +- sickgear/providers/nyaa.py | 2 +- sickgear/providers/omgwtfnzbs.py | 11 +- sickgear/providers/pretome.py | 2 +- sickgear/providers/privatehd.py | 6 +- sickgear/providers/ptf.py | 4 +- sickgear/providers/rarbg.py | 2 +- sickgear/providers/revtt.py | 2 +- sickgear/providers/rsstorrent.py | 2 +- sickgear/providers/scenehd.py | 4 +- sickgear/providers/scenetime.py | 4 +- sickgear/providers/shazbat.py | 2 +- sickgear/providers/showrss.py | 2 +- sickgear/providers/snowfl.py | 2 +- sickgear/providers/speedapp.py | 2 +- sickgear/providers/speedcd.py | 4 +- sickgear/providers/thepiratebay.py | 6 +- sickgear/providers/torlock.py | 2 +- sickgear/providers/torrentday.py | 2 +- sickgear/providers/torrenting.py | 4 +- sickgear/providers/torrentleech.py | 2 +- sickgear/providers/tvchaosuk.py | 2 +- sickgear/providers/xspeeds.py | 4 +- sickgear/rssfeeds.py | 6 +- sickgear/sab.py | 18 +- sickgear/scene_exceptions.py | 58 ++-- sickgear/scene_numbering.py | 25 +- sickgear/scheduler.py | 8 +- sickgear/search.py | 148 +++++----- sickgear/search_backlog.py | 24 +- sickgear/search_queue.py | 54 ++-- sickgear/sgdatetime.py | 2 +- sickgear/show_name_helpers.py | 24 +- sickgear/show_queue.py | 99 +++---- sickgear/show_updater.py | 56 ++-- sickgear/subtitles.py | 20 +- sickgear/tv.py | 386 ++++++++++++------------- sickgear/tv_base.py | 4 +- sickgear/tvcache.py | 19 +- sickgear/version_checker.py | 87 +++--- sickgear/watchedstate_queue.py | 2 +- sickgear/webapi.py | 42 ++- sickgear/webserve.py | 228 +++++++-------- sickgear/webserveInit.py | 11 +- tests/migration_tests.py | 20 +- tests/name_parser_tests.py | 10 +- tests/newznab_tests.py | 12 +- tests/scene_helpers_tests.py | 4 +- tests/test_lib.py | 4 +- tests/xem_tests.py | 4 +- 133 files changed, 1799 insertions(+), 1930 deletions(-) diff --git a/_cleaner.py b/_cleaner.py index 8c6eac00..60b21232 100644 --- a/_cleaner.py +++ b/_cleaner.py @@ -125,7 +125,7 @@ for cleaned_path, test_path, dir_list in cleanups: pass with io.open(cleaned_file, 'w+', encoding='utf-8') as fp: - fp.write(u'This file exists to prevent a rerun delete of *.pyc, *.pyo files') + fp.write('This file exists to prevent a rerun delete of *.pyc, *.pyo files') fp.flush() os.fsync(fp.fileno()) @@ -166,10 +166,10 @@ if not os.path.isfile(cleaned_file) or os.path.exists(test): swap_name = cleaned_file cleaned_file = danger_output danger_output = swap_name - msg = u'Failed (permissions?) to delete file(s). You must manually delete:\r\n%s' % '\r\n'.join(bad_files) + msg = 'Failed (permissions?) to delete file(s). You must manually delete:\r\n%s' % '\r\n'.join(bad_files) print(msg) else: - msg = u'This file exists to prevent a rerun delete of dead lib/html5lib files' + msg = 'This file exists to prevent a rerun delete of dead lib/html5lib files' with io.open(cleaned_file, 'w+', encoding='utf-8') as fp: fp.write(msg) diff --git a/lib/api_trakt/trakt.py b/lib/api_trakt/trakt.py index 663d944f..fed70438 100644 --- a/lib/api_trakt/trakt.py +++ b/lib/api_trakt/trakt.py @@ -277,7 +277,7 @@ class TraktAPI(object): code = getattr(e.response, 'status_code', None) if not code: if 'timed out' in ex(e): - log.warning(u'Timeout connecting to Trakt') + log.warning('Timeout connecting to Trakt') if count >= self.max_retrys: raise TraktTimeout() return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, @@ -285,12 +285,12 @@ class TraktAPI(object): # This is pretty much a fatal error if there is no status_code # It means there basically was no response at all else: - log.warning(u'Could not connect to Trakt. Error: %s' % ex(e)) + log.warning('Could not connect to Trakt. Error: %s' % ex(e)) raise TraktException('Could not connect to Trakt. Error: %s' % ex(e)) elif 502 == code: # Retry the request, Cloudflare had a proxying issue - log.warning(u'Retrying Trakt api request: %s' % path) + log.warning(f'Retrying Trakt api request: {path}') if count >= self.max_retrys: raise TraktCloudFlareException() return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, @@ -303,7 +303,7 @@ class TraktAPI(object): return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, send_oauth=send_oauth, method=method) - log.warning(u'Unauthorized. Please check your Trakt settings') + log.warning('Unauthorized. Please check your Trakt settings') sickgear.TRAKT_ACCOUNTS[send_oauth].auth_failure() raise TraktAuthException() @@ -318,18 +318,18 @@ class TraktAPI(object): raise TraktAuthException() elif code in (500, 501, 503, 504, 520, 521, 522): if count >= self.max_retrys: - log.warning(u'Trakt may have some issues and it\'s unavailable. Code: %s' % code) + log.warning(f'Trakt may have some issues and it\'s unavailable. Code: {code}') raise TraktServerError(error_code=code) # http://docs.trakt.apiary.io/#introduction/status-codes - log.warning(u'Trakt may have some issues and it\'s unavailable. Trying again') + log.warning('Trakt may have some issues and it\'s unavailable. Trying again') return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, send_oauth=send_oauth, method=method) elif 404 == code: - log.warning(u'Trakt error (404) the resource does not exist: %s%s' % (url, path)) + log.warning(f'Trakt error (404) the resource does not exist: {url}{path}') raise TraktMethodNotExisting('Trakt error (404) the resource does not exist: %s%s' % (url, path)) elif 429 == code: if count >= self.max_retrys: - log.warning(u'Trakt replied with Rate-Limiting, maximum retries exceeded.') + log.warning('Trakt replied with Rate-Limiting, maximum retries exceeded.') raise TraktServerError(error_code=code) r_headers = getattr(e.response, 'headers', None) if None is not r_headers: @@ -356,14 +356,14 @@ class TraktAPI(object): 'revoked, does not match the redirection URI used in the authorization request,' ' or was issued to another client.') else: - log.error(u'Could not connect to Trakt. Code error: {0}'.format(code)) + log.error('Could not connect to Trakt. Code error: {0}'.format(code)) raise TraktException('Could not connect to Trakt. Code error: %s' % code) except ConnectionSkipException as e: log.warning('Connection is skipped') raise e except ValueError as e: - log.error(u'Value Error: %s' % ex(e)) - raise TraktValueError(u'Value Error: %s' % ex(e)) + log.error(f'Value Error: {ex(e)}') + raise TraktValueError(f'Value Error: {ex(e)}') except (BaseException, Exception) as e: log.error('Exception: %s' % ex(e)) raise TraktException('Could not connect to Trakt. Code error: %s' % ex(e)) diff --git a/lib/api_tvdb/tvdb_api.py b/lib/api_tvdb/tvdb_api.py index dc679ad5..c254d9a0 100644 --- a/lib/api_tvdb/tvdb_api.py +++ b/lib/api_tvdb/tvdb_api.py @@ -138,7 +138,7 @@ class Tvdb(TVInfoBase): """Create easy-to-use interface to name of season/episode name >> t = Tvdb() >> t['Scrubs'][1][24]['episodename'] - u'My Last Day' + 'My Last Day' """ map_languages = {} reverse_map_languages = {v: k for k, v in iteritems(map_languages)} @@ -201,7 +201,7 @@ class Tvdb(TVInfoBase): >> t = Tvdb(actors=True) >> t['scrubs']['actors'][0]['name'] - u'Zach Braff' + 'Zach Braff' custom_ui (tvdb_ui.BaseUI subclass): A callable subclass of tvdb_ui.BaseUI (overrides interactive option) @@ -580,7 +580,7 @@ class Tvdb(TVInfoBase): data_list.append(cr) resp['data'] = data_list return resp - return dict([(u'data', (None, resp)[isinstance(resp, string_types)])]) + return dict([('data', (None, resp)[isinstance(resp, string_types)])]) def _getetsrc(self, url, params=None, language=None, parse_json=False): """Loads a URL using caching @@ -1015,14 +1015,14 @@ class Tvdb(TVInfoBase): url_image = self._make_image(self.config['url_artworks'], image_data['data'][0]['filename']) url_thumb = self._make_image(self.config['url_artworks'], image_data['data'][0]['thumbnail']) self._set_show_data(sid, image_type, url_image) - self._set_show_data(sid, u'%s_thumb' % image_type, url_thumb) + self._set_show_data(sid, f'{image_type}_thumb', url_thumb) excluded_main_data = True # artwork found so prevent fallback self._parse_banners(sid, image_data['data']) self.shows[sid].__dict__[loaded_name] = True # fallback image thumbnail for none excluded_main_data if artwork is not found if not excluded_main_data and show_data['data'].get(image_type): - self._set_show_data(sid, u'%s_thumb' % image_type, + self._set_show_data(sid, f'{image_type}_thumb', re.sub(r'\.jpg$', '_t.jpg', show_data['data'][image_type], flags=re.I)) def _get_show_data(self, @@ -1067,11 +1067,11 @@ class Tvdb(TVInfoBase): else: show_data = {'data': {}} - for img_type, en_type, p_type in [(u'poster', 'posters_enabled', posters), - (u'banner', 'banners_enabled', banners), - (u'fanart', 'fanart_enabled', fanart), - (u'season', 'seasons_enabled', seasons), - (u'seasonwide', 'seasonwides_enabled', seasonwides)]: + for img_type, en_type, p_type in [('poster', 'posters_enabled', posters), + ('banner', 'banners_enabled', banners), + ('fanart', 'fanart_enabled', fanart), + ('season', 'seasons_enabled', seasons), + ('seasonwide', 'seasonwides_enabled', seasonwides)]: self._parse_images(sid, language, show_data, img_type, en_type, p_type) if (actors or self.config['actors_enabled']) and not getattr(self.shows.get(sid), 'actors_loaded', False): @@ -1175,9 +1175,9 @@ class Tvdb(TVInfoBase): else: page += 1 - ep_map_keys = {'absolutenumber': u'absolute_number', 'airedepisodenumber': u'episodenumber', - 'airedseason': u'seasonnumber', 'airedseasonid': u'seasonid', - 'dvdepisodenumber': u'dvd_episodenumber', 'dvdseason': u'dvd_season'} + ep_map_keys = {'absolutenumber': 'absolute_number', 'airedepisodenumber': 'episodenumber', + 'airedseason': 'seasonnumber', 'airedseasonid': 'seasonid', + 'dvdepisodenumber': 'dvd_episodenumber', 'dvdseason': 'dvd_season'} for cur_ep in episodes: if self.config['dvdorder']: diff --git a/lib/api_tvdb/tvdb_ui.py b/lib/api_tvdb/tvdb_ui.py index fae43830..19765764 100644 --- a/lib/api_tvdb/tvdb_ui.py +++ b/lib/api_tvdb/tvdb_ui.py @@ -17,8 +17,8 @@ It must have a method "select_series", this is passed a list of dicts, each dict contains the the keys "name" (human readable show name), and "sid" (the shows ID as on thetvdb.com). For example: -[{'name': u'Lost', 'sid': u'73739'}, - {'name': u'Lost Universe', 'sid': u'73181'}] +[{'name': 'Lost', 'sid': '73739'}, + {'name': 'Lost Universe', 'sid': '73181'}] The "select_series" method must return the appropriate dict, or it can raise tvdb_userabort (if the selection is aborted), tvdb_shownotfound (if the show diff --git a/lib/certgen.py b/lib/certgen.py index 0f2b3788..82260b8a 100644 --- a/lib/certgen.py +++ b/lib/certgen.py @@ -77,7 +77,7 @@ def generate_key(key_size=4096, output_file='server.key'): # Ported from cryptography docs/x509/tutorial.rst def generate_local_cert(private_key, days_valid=3650, output_file='server.crt', loc_name=None, org_name=None): - def_name = u'SickGear' + def_name = 'SickGear' # Various details about who we are. For a self-signed certificate the # subject and issuer are always the same. @@ -88,7 +88,7 @@ def generate_local_cert(private_key, days_valid=3650, output_file='server.crt', # build Subject Alternate Names (aka SAN) list # First the host names, add with x509.DNSName(): - san_list = [x509.DNSName(u'localhost')] + san_list = [x509.DNSName('localhost')] try: thishostname = text_type(socket.gethostname()) san_list.append(x509.DNSName(thishostname)) @@ -100,13 +100,13 @@ def generate_local_cert(private_key, days_valid=3650, output_file='server.crt', try: # noinspection PyCompatibility from ipaddress import IPv4Address, IPv6Address - san_list.append(x509.IPAddress(IPv4Address(u'127.0.0.1'))) - san_list.append(x509.IPAddress(IPv6Address(u'::1'))) + san_list.append(x509.IPAddress(IPv4Address('127.0.0.1'))) + san_list.append(x509.IPAddress(IPv6Address('::1'))) # append local v4 ip mylocalipv4 = localipv4() if mylocalipv4: - san_list.append(x509.IPAddress(IPv4Address(u'' + mylocalipv4))) + san_list.append(x509.IPAddress(IPv4Address('' + mylocalipv4))) except (ImportError, Exception): pass diff --git a/lib/plex/plex.py b/lib/plex/plex.py index 18996ce7..e024b03e 100644 --- a/lib/plex/plex.py +++ b/lib/plex/plex.py @@ -96,7 +96,7 @@ class Plex(object): if self.use_logger: msg = 'Plex:: ' + msg if debug: - logger.log(msg, logger.DEBUG) + logger.debug(msg) else: logger.log(msg) # else: diff --git a/lib/sg_helpers.py b/lib/sg_helpers.py index 41f99ef9..4dc99d1c 100644 --- a/lib/sg_helpers.py +++ b/lib/sg_helpers.py @@ -660,7 +660,7 @@ def clean_data(data): if isinstance(data, dict): return {k: clean_data(v) for k, v in iteritems(data)} if isinstance(data, string_types): - return unicodedata.normalize('NFKD', html_unescape(data).strip().replace(u'&', u'&')) + return unicodedata.normalize('NFKD', html_unescape(data).strip().replace('&', '&')) return data @@ -938,8 +938,8 @@ def get_url(url, # type: AnyStr else: http_err_text = 'Custom HTTP error code' if 'mute_http_error' not in mute: - logger.debug(u'Response not ok. %s: %s from requested url %s' - % (response.status_code, http_err_text, url)) + logger.debug(f'Response not ok. {response.status_code}: {http_err_text} from requested url' + f' {url}') except requests.exceptions.HTTPError as e: raised = e @@ -948,29 +948,29 @@ def get_url(url, # type: AnyStr not (exclude_client_http_codes and is_client_error): connection_fail_params = dict(fail_type=ConnectionFailTypes.http, code=e.response.status_code) if not raise_status_code: - logger.warning(u'HTTP error %s while loading URL%s' % (e.errno, _maybe_request_url(e))) + logger.warning(f'HTTP error {e.errno} while loading URL{_maybe_request_url(e)}') except requests.exceptions.ConnectionError as e: raised = e if 'mute_connect_err' not in mute: - logger.warning(u'Connection error msg:%s while loading URL%s' % (ex(e), _maybe_request_url(e))) + logger.warning(f"Connection error msg:{ex(e)} while loading URL{_maybe_request_url(e)}") if failure_monitor: connection_fail_params = dict(fail_type=ConnectionFailTypes.connection) except requests.exceptions.ReadTimeout as e: raised = e if 'mute_read_timeout' not in mute: - logger.warning(u'Read timed out msg:%s while loading URL%s' % (ex(e), _maybe_request_url(e))) + logger.warning(f'Read timed out msg:{ex(e)} while loading URL{_maybe_request_url(e)}') if failure_monitor: connection_fail_params = dict(fail_type=ConnectionFailTypes.timeout) except (requests.exceptions.Timeout, socket.timeout) as e: raised = e if 'mute_connect_timeout' not in mute: - logger.warning(u'Connection timed out msg:%s while loading URL %s' % (ex(e), _maybe_request_url(e, url))) + logger.warning(f'Connection timed out msg:{ex(e)} while loading URL {_maybe_request_url(e, url)}') if failure_monitor: connection_fail_params = dict(fail_type=ConnectionFailTypes.connection_timeout) except (BaseException, Exception) as e: raised = e - logger.warning((u'Exception caught while loading URL {0}\r\nDetail... %s\r\n{1}' % ex(e), - u'Unknown exception while loading URL {0}\r\nDetail... {1}')[not ex(e)] + logger.warning(('Exception caught while loading URL {0}\r\nDetail... %s\r\n{1}' % ex(e), + 'Unknown exception while loading URL {0}\r\nDetail... {1}')[not ex(e)] .format(url, traceback.format_exc())) if failure_monitor: connection_fail_params = dict(fail_type=ConnectionFailTypes.other) @@ -1009,8 +1009,8 @@ def get_url(url, # type: AnyStr result = result, session except (TypeError, Exception) as e: raised = e - logger.warning(u'%s data issue from URL %s\r\nDetail... %s' % ( - ('Proxy browser', 'JSON')[parse_json], url, ex(e))) + logger.warning(f'{("Proxy browser", "JSON")[parse_json]} data issue from URL {url}\r\n' + f'Detail... {ex(e)}') elif savename: try: @@ -1135,15 +1135,15 @@ def fix_set_group_id(child_path): user_id = os.geteuid() # only available on UNIX if 0 != user_id and user_id != child_path_owner: - logger.debug(u'Not running as root or owner of %s, not trying to set the set-group-id' % child_path) + logger.debug(f'Not running as root or owner of {child_path}, not trying to set the set-group-id') return try: os.chown(child_path, -1, parent_gid) # only available on UNIX - logger.debug(u'Respecting the set-group-ID bit on the parent directory for %s' % child_path) + logger.debug(f'Respecting the set-group-ID bit on the parent directory for {child_path}') except OSError: - logger.error(u'Failed to respect the set-group-id bit on the parent directory for %s (setting group id %i)' - % (child_path, parent_gid)) + logger.error(f'Failed to respect the set-group-id bit on the parent directory for {child_path}' + f' (setting group id {parent_gid:d})') def remove_file_perm(filepath, log_err=True): @@ -1203,9 +1203,9 @@ def remove_file(filepath, tree=False, prefix_failure='', log_level=logging.INFO) os.remove(filepath) except OSError as e: if getattr(e, 'winerror', 0) not in (5, 32): # 5=access denied (e.g. av), 32=another process has lock - logger.log(level=log_level, msg=u'%sUnable to %s %s %s: %s' % - (prefix_failure, ('delete', 'trash')[TRASH_REMOVE_SHOW], - ('file', 'dir')[tree], filepath, ex(e))) + logger.log(level=log_level, + msg=f'{prefix_failure}Unable to {("delete", "trash")[TRASH_REMOVE_SHOW]}' + f' {("file", "dir")[tree]} {filepath}: {ex(e)}') break time.sleep(t) if not os.path.exists(filepath): @@ -1258,10 +1258,10 @@ def make_path(name, syno=False): # Windows, create all missing folders if os.name in ('nt', 'ce'): try: - logger.debug(u'Path %s doesn\'t exist, creating it' % name) + logger.debug(f"Path {name} doesn't exist, creating it") os.makedirs(name) except (OSError, IOError) as e: - logger.error(u'Failed creating %s : %s' % (name, ex(e))) + logger.error(f'Failed creating {name} : {ex(e)}') return False # not Windows, create all missing folders and set permissions @@ -1278,7 +1278,7 @@ def make_path(name, syno=False): continue try: - logger.debug(u'Path %s doesn\'t exist, creating it' % sofar) + logger.debug(f"Path {sofar} doesn't exist, creating it") os.mkdir(sofar) # use normpath to remove end separator, otherwise checks permissions against itself chmod_as_parent(os.path.normpath(sofar)) @@ -1286,7 +1286,7 @@ def make_path(name, syno=False): # do the library update for synoindex NOTIFIERS.NotifierFactory().get('SYNOINDEX').addFolder(sofar) except (OSError, IOError) as e: - logger.error(u'Failed creating %s : %s' % (sofar, ex(e))) + logger.error(f'Failed creating {sofar} : {ex(e)}') return False return True @@ -1306,7 +1306,7 @@ def chmod_as_parent(child_path): parent_path = os.path.dirname(child_path) if not parent_path: - logger.debug(u'No parent path provided in %s, unable to get permissions from it' % child_path) + logger.debug(f'No parent path provided in {child_path}, unable to get permissions from it') return parent_path_stat = os.stat(parent_path) @@ -1327,15 +1327,14 @@ def chmod_as_parent(child_path): user_id = os.geteuid() # only available on UNIX if 0 != user_id and user_id != child_path_owner: - logger.debug(u'Not running as root or owner of %s, not trying to set permissions' % child_path) + logger.debug(f'Not running as root or owner of {child_path}, not trying to set permissions') return try: os.chmod(child_path, child_mode) - logger.debug(u'Setting permissions for %s to %o as parent directory has %o' - % (child_path, child_mode, parent_mode)) + logger.debug(f'Setting permissions for {child_path} to {child_mode:o} as parent directory has {parent_mode:o}') except OSError: - logger.error(u'Failed to set permission for %s to %o' % (child_path, child_mode)) + logger.error(f'Failed to set permission for {child_path} to {child_mode:o}') def file_bit_filter(mode): diff --git a/sickgear.py b/sickgear.py index 4ccb25de..3cc70492 100755 --- a/sickgear.py +++ b/sickgear.py @@ -190,7 +190,7 @@ class SickGear(object): rc.load_msg = load_msg rc.run(max_v) else: - print(u'ERROR: Could not download Rollback Module.') + print('ERROR: Could not download Rollback Module.') except (BaseException, Exception): pass @@ -290,13 +290,13 @@ class SickGear(object): if self.run_as_daemon: pid_dir = os.path.dirname(self.pid_file) if not os.access(pid_dir, os.F_OK): - sys.exit(u"PID dir: %s doesn't exist. Exiting." % pid_dir) + sys.exit(f"PID dir: {pid_dir} doesn't exist. Exiting.") if not os.access(pid_dir, os.W_OK): - sys.exit(u'PID dir: %s must be writable (write permissions). Exiting.' % pid_dir) + sys.exit(f'PID dir: {pid_dir} must be writable (write permissions). Exiting.') else: if self.console_logging: - print(u'Not running in daemon mode. PID file creation disabled') + print('Not running in daemon mode. PID file creation disabled') self.create_pid = False @@ -309,27 +309,27 @@ class SickGear(object): try: os.makedirs(sickgear.DATA_DIR, 0o744) except os.error: - sys.exit(u'Unable to create data directory: %s Exiting.' % sickgear.DATA_DIR) + sys.exit(f'Unable to create data directory: {sickgear.DATA_DIR} Exiting.') # Make sure we can write to the data dir if not os.access(sickgear.DATA_DIR, os.W_OK): - sys.exit(u'Data directory: %s must be writable (write permissions). Exiting.' % sickgear.DATA_DIR) + sys.exit(f'Data directory: {sickgear.DATA_DIR} must be writable (write permissions). Exiting.') # Make sure we can write to the config file if not os.access(sickgear.CONFIG_FILE, os.W_OK): if os.path.isfile(sickgear.CONFIG_FILE): - sys.exit(u'Config file: %s must be writeable (write permissions). Exiting.' % sickgear.CONFIG_FILE) + sys.exit(f'Config file: {sickgear.CONFIG_FILE} must be writeable (write permissions). Exiting.') elif not os.access(os.path.dirname(sickgear.CONFIG_FILE), os.W_OK): - sys.exit(u'Config file directory: %s must be writeable (write permissions). Exiting' - % os.path.dirname(sickgear.CONFIG_FILE)) + sys.exit(f'Config file directory: {os.path.dirname(sickgear.CONFIG_FILE)}' + f' must be writeable (write permissions). Exiting') os.chdir(sickgear.DATA_DIR) if self.console_logging: - print(u'Starting up SickGear from %s' % sickgear.CONFIG_FILE) + print(f'Starting up SickGear from {sickgear.CONFIG_FILE}') # Load the config and publish it to the sickgear package if not os.path.isfile(sickgear.CONFIG_FILE): - print(u'Unable to find "%s", all settings will be default!' % sickgear.CONFIG_FILE) + print(f'Unable to find "{sickgear.CONFIG_FILE}", all settings will be default!') sickgear.CFG = ConfigObj(sickgear.CONFIG_FILE) try: @@ -353,7 +353,7 @@ class SickGear(object): sickgear.initialize(console_logging=self.console_logging) if self.forced_port: - logger.log(u'Forcing web server to port %s' % self.forced_port) + logger.log(f'Forcing web server to port {self.forced_port}') self.start_port = self.forced_port else: self.start_port = sickgear.WEB_PORT @@ -403,12 +403,11 @@ class SickGear(object): self.webserver.wait_server_start() sickgear.started = True except (BaseException, Exception): - logger.log(u'Unable to start web server, is something else running on port %d?' % self.start_port, - logger.ERROR) + logger.error(f'Unable to start web server, is something else running on port {self.start_port:d}?') if self.run_as_systemd: self.exit(0) if sickgear.LAUNCH_BROWSER and not self.no_launch: - logger.log(u'Launching browser and exiting', logger.ERROR) + logger.error('Launching browser and exiting') sickgear.launch_browser(self.start_port) self.exit(1) @@ -439,11 +438,11 @@ class SickGear(object): self.execute_rollback(mo, max_v, load_msg) cur_db_version = db.DBConnection(d).check_db_version() if 100000 <= cur_db_version: - print(u'Rollback to production failed.') - sys.exit(u'If you have used other forks, your database may be unusable due to their changes') + print('Rollback to production failed.') + sys.exit('If you have used other forks, your database may be unusable due to their changes') if 100000 <= max_v and None is not base_v: max_v = base_v # set max_v to the needed base production db for test_db - print(u'Rollback to production of [%s] successful.' % d) + print(f'Rollback to production of [{d}] successful.') sickgear.classes.loading_msg.set_msg_progress(load_msg, 'Finished') # handling of production version higher than current base of test db @@ -454,30 +453,29 @@ class SickGear(object): self.execute_rollback(mo, base_v, load_msg) cur_db_version = db.DBConnection(d).check_db_version() if 100000 <= cur_db_version: - print(u'Rollback to production base failed.') - sys.exit(u'If you have used other forks, your database may be unusable due to their changes') + print('Rollback to production base failed.') + sys.exit('If you have used other forks, your database may be unusable due to their changes') if 100000 <= max_v and None is not base_v: max_v = base_v # set max_v to the needed base production db for test_db - print(u'Rollback to production base of [%s] successful.' % d) + print(f'Rollback to production base of [{d}] successful.') sickgear.classes.loading_msg.set_msg_progress(load_msg, 'Finished') # handling of production db versions if 0 < cur_db_version < 100000: if cur_db_version < min_v: - print(u'Your [%s] database version (%s) is too old to migrate from with this version of SickGear' - % (d, cur_db_version)) - sys.exit(u'Upgrade using a previous version of SG first,' - + u' or start with no database file to begin fresh') + print(f'Your [{d}] database version ({cur_db_version})' + f' is too old to migrate from with this version of SickGear') + sys.exit('Upgrade using a previous version of SG first,' + ' or start with no database file to begin fresh') if cur_db_version > max_v: sickgear.classes.loading_msg.set_msg_progress(load_msg, 'Rollback') - print(u'Your [%s] database version (%s) has been incremented past' - u' what this version of SickGear supports. Trying to rollback now. Please wait...' % - (d, cur_db_version)) + print(f'Your [{d}] database version ({cur_db_version}) has been incremented past what this' + f' version of SickGear supports. Trying to rollback now. Please wait...') self.execute_rollback(mo, max_v, load_msg) if db.DBConnection(d).check_db_version() > max_v: - print(u'Rollback failed.') - sys.exit(u'If you have used other forks, your database may be unusable due to their changes') - print(u'Rollback of [%s] successful.' % d) + print('Rollback failed.') + sys.exit('If you have used other forks, your database may be unusable due to their changes') + print(f'Rollback of [{d}] successful.') sickgear.classes.loading_msg.set_msg_progress(load_msg, 'Finished') # migrate the config if it needs it @@ -501,9 +499,9 @@ class SickGear(object): if os.path.exists(restore_dir): sickgear.classes.loading_msg.message = 'Restoring files' if self.restore(restore_dir, sickgear.DATA_DIR): - logger.log(u'Restore successful...') + logger.log('Restore successful...') else: - logger.log_error_and_exit(u'Restore FAILED!') + logger.log_error_and_exit('Restore FAILED!') # refresh network timezones sickgear.classes.loading_msg.message = 'Checking network timezones' @@ -669,7 +667,7 @@ class SickGear(object): # Write pid if self.create_pid: pid = str(os.getpid()) - logger.log(u'Writing PID: %s to %s' % (pid, self.pid_file)) + logger.log(f'Writing PID: {pid} to {self.pid_file}') try: os.fdopen(os.open(self.pid_file, os.O_CREAT | os.O_WRONLY, 0o644), 'w').write('%s\n' % pid) except (BaseException, Exception) as er: @@ -705,7 +703,7 @@ class SickGear(object): Populates the showList with shows from the database """ - logger.log(u'Loading initial show list') + logger.log('Loading initial show list') my_db = db.DBConnection(row_type='dict') sql_result = my_db.select( @@ -749,8 +747,7 @@ class SickGear(object): sickgear.showDict[show_obj.sid_int] = show_obj _ = show_obj.ids except (BaseException, Exception) as err: - logger.log('There was an error creating the show in %s: %s' % ( - cur_result['location'], ex(err)), logger.ERROR) + logger.error('There was an error creating the show in %s: %s' % (cur_result['location'], ex(err))) sickgear.webserve.Home.make_showlist_unique_names() @staticmethod @@ -801,13 +798,13 @@ class SickGear(object): popen_list += sickgear.MY_ARGS if self.run_as_systemd: - logger.log(u'Restarting SickGear with exit(1) handler and %s' % popen_list) + logger.log(f'Restarting SickGear with exit(1) handler and {popen_list}') logger.close() self.exit(1) if '--nolaunch' not in popen_list: popen_list += ['--nolaunch'] - logger.log(u'Restarting SickGear with %s' % popen_list) + logger.log(f'Restarting SickGear with {popen_list}') logger.close() from _23 import Popen with Popen(popen_list, cwd=os.getcwd()): diff --git a/sickgear/__init__.py b/sickgear/__init__.py index 53d7e3b7..305952a2 100644 --- a/sickgear/__init__.py +++ b/sickgear/__init__.py @@ -803,7 +803,7 @@ def init_stage_1(console_logging): CACHE_DIR = ACTUAL_CACHE_DIR if not helpers.make_dir(CACHE_DIR): - logger.log(u'!!! Creating local cache dir failed, using system default', logger.ERROR) + logger.error('!!! creating local cache dir failed, using system default') CACHE_DIR = None # clean cache folders @@ -811,7 +811,7 @@ def init_stage_1(console_logging): helpers.clear_cache() ZONEINFO_DIR = os.path.join(CACHE_DIR, 'zoneinfo') if not os.path.isdir(ZONEINFO_DIR) and not helpers.make_path(ZONEINFO_DIR): - logger.log(u'!!! Creating local zoneinfo dir failed', logger.ERROR) + logger.error('!!! creating local zoneinfo dir failed') sg_helpers.CACHE_DIR = CACHE_DIR sg_helpers.DATA_DIR = DATA_DIR @@ -830,7 +830,7 @@ def init_stage_1(console_logging): TRIM_ZERO = bool(check_setting_int(CFG, 'GUI', 'trim_zero', 0)) DATE_PRESET = check_setting_str(CFG, 'GUI', 'date_preset', '%x') TIME_PRESET_W_SECONDS = check_setting_str(CFG, 'GUI', 'time_preset', '%I:%M:%S %p') - TIME_PRESET = TIME_PRESET_W_SECONDS.replace(u':%S', u'') + TIME_PRESET = TIME_PRESET_W_SECONDS.replace(':%S', '') TIMEZONE_DISPLAY = check_setting_str(CFG, 'GUI', 'timezone_display', 'network') SHOW_TAGS = check_setting_str(CFG, 'GUI', 'show_tags', 'Show List').split(',') SHOW_TAG_DEFAULT = check_setting_str(CFG, 'GUI', 'show_tag_default', @@ -842,7 +842,7 @@ def init_stage_1(console_logging): LOG_DIR = os.path.normpath(os.path.join(DATA_DIR, ACTUAL_LOG_DIR)) if not helpers.make_dir(LOG_DIR): - logger.log(u'!!! No log folder, logging to screen only!', logger.ERROR) + logger.error('!!! no log folder, logging to screen only!') FILE_LOGGING_PRESET = check_setting_str(CFG, 'General', 'file_logging_preset', 'DEBUG') if bool(check_setting_int(CFG, 'General', 'file_logging_db', 0)): @@ -1488,7 +1488,7 @@ def init_stage_1(console_logging): ('docker/other', 'snap')['snap' in CUR_COMMIT_HASH] if not os.path.isfile(CONFIG_FILE): - logger.log(u'Unable to find \'%s\', all settings will be default!' % CONFIG_FILE, logger.DEBUG) + logger.debug(f'Unable to find \'{CONFIG_FILE}\', all settings will be default!') update_config = True # Get expected config version @@ -1747,20 +1747,20 @@ def restart(soft=True, update_pkg=None): if update_pkg: MY_ARGS.append('--update-pkg') - logger.log(u'Trigger event restart') + logger.log('Trigger event restart') events.put(events.SystemEvent.RESTART) else: halt() save_all() - logger.log(u'Re-initializing all data') + logger.log('Re-initializing all data') initialize() def sig_handler(signum=None, _=None): is_ctrlbreak = 'win32' == sys.platform and signal.SIGBREAK == signum - msg = u'Signal "%s" found' % (signal.SIGINT == signum and 'CTRL-C' or is_ctrlbreak and 'CTRL+BREAK' or - signal.SIGTERM == signum and 'Termination' or signum) + msg = 'Signal "%s" found' % (signal.SIGINT == signum and 'CTRL-C' or is_ctrlbreak and 'CTRL+BREAK' or + signal.SIGTERM == signum and 'Termination' or signum) if None is signum or signum in (signal.SIGINT, signal.SIGTERM) or is_ctrlbreak: logger.log('%s, saving and exiting...' % msg) events.put(events.SystemEvent.SHUTDOWN) @@ -1831,12 +1831,12 @@ def save_all(): global showList # write all shows - logger.log(u'Saving all shows to the database') + logger.log('Saving all shows to the database') for show_obj in showList: # type: tv.TVShow show_obj.save_to_db() # save config - logger.log(u'Saving config file to disk') + logger.log('Saving config file to disk') save_config() @@ -2400,4 +2400,4 @@ def launch_browser(start_port=None): try: webbrowser.open(browser_url, 1, True) except (BaseException, Exception): - logger.log('Unable to launch a browser', logger.ERROR) + logger.error('Unable to launch a browser') diff --git a/sickgear/anime.py b/sickgear/anime.py index 47eeb48d..b5f0938f 100644 --- a/sickgear/anime.py +++ b/sickgear/anime.py @@ -52,7 +52,7 @@ class AniGroupList(object): self.load() def load(self): - logger.log(u'Building allow amd block list for %s' % self.tvid_prodid, logger.DEBUG) + logger.debug(f'Building allow amd block list for {self.tvid_prodid}') self.allowlist = self._load_list('allowlist') self.blocklist = self._load_list('blocklist') @@ -74,8 +74,7 @@ class AniGroupList(object): for cur_result in sql_result: groups.append(cur_result['keyword']) - logger.log('AniPermsList: %s loaded keywords from %s: %s' % (self.tvid_prodid, table, groups), - logger.DEBUG) + logger.debug('AniPermsList: %s loaded keywords from %s: %s' % (self.tvid_prodid, table, groups)) return groups @@ -88,7 +87,7 @@ class AniGroupList(object): self._del_all_keywords('allowlist') self._add_keywords('allowlist', values) self.allowlist = values - logger.log('Allowlist set to: %s' % self.allowlist, logger.DEBUG) + logger.debug('Allowlist set to: %s' % self.allowlist) def set_block_keywords(self, values): # type: (List[AnyStr]) -> None @@ -99,7 +98,7 @@ class AniGroupList(object): self._del_all_keywords('blocklist') self._add_keywords('blocklist', values) self.blocklist = values - logger.log('Blocklist set to: %s' % self.blocklist, logger.DEBUG) + logger.debug('Blocklist set to: %s' % self.blocklist) def _del_all_keywords(self, table): # type: (AnyStr) -> None @@ -133,15 +132,14 @@ class AniGroupList(object): :return: True or False """ if not result.release_group: - logger.log('Failed to detect release group, invalid result', logger.DEBUG) + logger.debug('Failed to detect release group, invalid result') return False allowed = result.release_group.lower() in [x.lower() for x in self.allowlist] or not self.allowlist blocked = result.release_group.lower() in [x.lower() for x in self.blocklist] - logger.log('Result %sallowed%s in block list. Parsed group name: "%s" from result "%s"' % - (('not ', '')[allowed], (', but', ' and not')[not blocked], result.release_group, result.name), - logger.DEBUG) + logger.debug(f'Result {("not ", "")[allowed]}allowed{(", but", " and not")[not blocked]} in block list.' + f' Parsed group name: "{result.release_group}" from result "{result.name}"') return allowed and not blocked @@ -193,29 +191,29 @@ def create_anidb_obj(**kwargs): def set_up_anidb_connection(): if not sickgear.USE_ANIDB: - logger.log(u'Usage of anidb disabled. Skipping', logger.DEBUG) + logger.debug('Usage of anidb disabled. Skipping') return False if not sickgear.ANIDB_USERNAME and not sickgear.ANIDB_PASSWORD: - logger.log(u'anidb username and/or password are not set. Aborting anidb lookup.', logger.DEBUG) + logger.debug('anidb username and/or password are not set. Aborting anidb lookup.') return False if not sickgear.ADBA_CONNECTION: - # anidb_logger = (lambda x: logger.log('ANIDB: ' + str(x)), logger.DEBUG) + # anidb_logger = (lambda x: logger.debug('ANIDB: ' + str(x))) sickgear.ADBA_CONNECTION = adba.Connection(keepAlive=True) # , log=anidb_logger) auth = False try: auth = sickgear.ADBA_CONNECTION.authed() except (BaseException, Exception) as e: - logger.log(u'exception msg: ' + ex(e)) + logger.log(f'exception msg: {ex(e)}') pass if not auth: try: sickgear.ADBA_CONNECTION.auth(sickgear.ANIDB_USERNAME, sickgear.ANIDB_PASSWORD) except (BaseException, Exception) as e: - logger.log(u'exception msg: ' + ex(e)) + logger.log(f'exception msg: {ex(e)}') return False else: return True @@ -230,7 +228,7 @@ def pull_anidb_groups(show_name): anime = create_anidb_obj(name=show_name) return anime.get_groups() except (BaseException, Exception) as e: - logger.log(u'Anidb exception: %s' % ex(e), logger.DEBUG) + logger.debug(f'Anidb exception: {ex(e)}') return False @@ -258,7 +256,7 @@ def push_anidb_mylist(filepath, anidb_episode): log = ('Adding the file to the anidb mylist', logger.DEBUG) result = True except (BaseException, Exception) as e: - log = (u'exception msg: %s' % ex(e), logger.MESSAGE) + log = (f'exception msg: {ex(e)}', logger.MESSAGE) result = False return result, log diff --git a/sickgear/auto_post_processer.py b/sickgear/auto_post_processer.py index dfa97031..124e8b4a 100644 --- a/sickgear/auto_post_processer.py +++ b/sickgear/auto_post_processer.py @@ -38,13 +38,12 @@ class PostProcesser(object): def _main(): if not os.path.isdir(sickgear.TV_DOWNLOAD_DIR): - logger.log(u"Automatic post-processing attempted but dir %s doesn't exist" % sickgear.TV_DOWNLOAD_DIR, - logger.ERROR) + logger.error('Automatic post-processing attempted but dir %s doesn\'t exist' % sickgear.TV_DOWNLOAD_DIR) return if not os.path.isabs(sickgear.TV_DOWNLOAD_DIR): - logger.log(u'Automatic post-processing attempted but dir %s is relative ' - '(and probably not what you really want to process)' % sickgear.TV_DOWNLOAD_DIR, logger.ERROR) + logger.error('Automatic post-processing attempted but dir %s is relative ' + '(and probably not what you really want to process)' % sickgear.TV_DOWNLOAD_DIR) return processTV.processDir(sickgear.TV_DOWNLOAD_DIR, is_basedir=True) diff --git a/sickgear/browser.py b/sickgear/browser.py index aa0e8589..6a902537 100644 --- a/sickgear/browser.py +++ b/sickgear/browser.py @@ -78,7 +78,7 @@ def folders_at_path(path, include_parent=False, include_files=False): try: file_list = get_file_list(path, include_files) except OSError as e: - logger.log('Unable to open %s: %r / %s' % (path, e, ex(e)), logger.WARNING) + logger.warning('Unable to open %s: %r / %s' % (path, e, ex(e))) file_list = get_file_list(parent_path, include_files) file_list = sorted(file_list, key=lambda x: os.path.basename(x['name']).lower()) diff --git a/sickgear/clients/deluge.py b/sickgear/clients/deluge.py index 411315a8..b137f66d 100644 --- a/sickgear/clients/deluge.py +++ b/sickgear/clients/deluge.py @@ -52,7 +52,7 @@ class DelugeAPI(GenericClient): if not connected: hosts = self._post_json({'method': 'web.get_hosts', 'params': [], 'id': 11}) if 0 == len(hosts): - logger.log('%s: WebUI does not contain daemons' % self.name, logger.ERROR) + logger.error('%s: WebUI does not contain daemons' % self.name) return None self._post_json({'method': 'web.connect', 'params': [hosts[0][0]], 'id': 11}, False) @@ -60,7 +60,7 @@ class DelugeAPI(GenericClient): connected = self._post_json({'method': 'web.connected', 'params': [], 'id': 10}) if not connected: - logger.log('%s: WebUI could not connect to daemon' % self.name, logger.ERROR) + logger.error('%s: WebUI could not connect to daemon' % self.name) return None except RequestException: return None @@ -94,7 +94,7 @@ class DelugeAPI(GenericClient): label = sickgear.TORRENT_LABEL if ' ' in label: - logger.log('%s: Invalid label. Label must not contain a space' % self.name, logger.ERROR) + logger.error('%s: Invalid label. Label must not contain a space' % self.name) return False if label: @@ -106,22 +106,21 @@ class DelugeAPI(GenericClient): if None is not labels: if label not in labels: - logger.log('%s: %s label does not exist in Deluge we must add it' % (self.name, label), - logger.DEBUG) + logger.debug('%s: %s label does not exist in Deluge we must add it' % (self.name, label)) self._request_json({ 'method': 'label.add', 'params': [label], 'id': 4}) - logger.log('%s: %s label added to Deluge' % (self.name, label), logger.DEBUG) + logger.debug('%s: %s label added to Deluge' % (self.name, label)) # add label to torrent self._request_json({ 'method': 'label.set_torrent', 'params': [result.hash, label], 'id': 5}) - logger.log('%s: %s label added to torrent' % (self.name, label), logger.DEBUG) + logger.debug('%s: %s label added to torrent' % (self.name, label)) else: - logger.log('%s: label plugin not detected' % self.name, logger.DEBUG) + logger.debug('%s: label plugin not detected' % self.name) return False return True diff --git a/sickgear/clients/download_station.py b/sickgear/clients/download_station.py index 03f95f15..2752ab48 100644 --- a/sickgear/clients/download_station.py +++ b/sickgear/clients/download_station.py @@ -71,7 +71,7 @@ class DownloadStationAPI(GenericClient): # type: (AnyStr) -> None out = '%s%s: %s' % (self.name, (' replied with', '')['Could not' in msg], msg) self._errmsg = '
%s.' % out - logger.log(out, logger.ERROR) + logger.error(out) def _error_task(self, response): @@ -234,7 +234,7 @@ class DownloadStationAPI(GenericClient): i = 0 while retry_ids: for i in tries: - logger.log('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i), logger.DEBUG) + logger.debug('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i)) time.sleep(i) item['fail'] = [] for task in filter(filter_func, self._tinf(retry_ids, err=True)): @@ -246,8 +246,8 @@ class DownloadStationAPI(GenericClient): retry_ids = item['fail'] else: if max(tries) == i: - logger.log('%s: failed to %s %s item(s) after %s tries over %s mins, aborted' % - (self.name, act, len(item['fail']), len(tries), sum(tries) / 60), logger.DEBUG) + logger.debug('%s: failed to %s %s item(s) after %s tries over %s mins, aborted' % + (self.name, act, len(item['fail']), len(tries), sum(tries) / 60)) return (item['fail'] + item['ignore']) or True @@ -261,8 +261,8 @@ class DownloadStationAPI(GenericClient): if 3 <= self._task_version: return self._add_torrent(uri={'uri': search_result.url}) - logger.log('%s: the API at %s doesn\'t support torrent magnet, download skipped' % - (self.name, self.host), logger.WARNING) + logger.warning('%s: the API at %s doesn\'t support torrent magnet, download skipped' % + (self.name, self.host)) def _add_torrent_file(self, search_result): # type: (TorrentSearchResult) -> Union[AnyStr, bool] diff --git a/sickgear/clients/generic.py b/sickgear/clients/generic.py index 143903e9..4ecc5084 100644 --- a/sickgear/clients/generic.py +++ b/sickgear/clients/generic.py @@ -51,7 +51,7 @@ class GenericClient(object): seg = seg[0:c - (len(sample) - 2)] + sample output += ['%s: request %s= %s%s%s' % (self.name, arg, ('', '..')[bool(i)], seg, ('', '..')[i != nch])] - logger.log(output, logger.DEBUG) + logger.debug(output) def _request(self, method='get', params=None, data=None, files=None, **kwargs): @@ -61,7 +61,7 @@ class GenericClient(object): self.last_time = time.time() if not self._get_auth(): - logger.log('%s: Authentication failed' % self.name, logger.ERROR) + logger.error('%s: Authentication failed' % self.name) return False # self._log_request_details(method, params, data, files, **kwargs) @@ -70,31 +70,30 @@ class GenericClient(object): response = self.session.__getattribute__(method)(self.url, params=params, data=data, files=files, timeout=kwargs.pop('timeout', 120), verify=False, **kwargs) except requests.exceptions.ConnectionError as e: - logger.log('%s: Unable to connect %s' % (self.name, ex(e)), logger.ERROR) + logger.error('%s: Unable to connect %s' % (self.name, ex(e))) return False except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL): - logger.log('%s: Invalid host' % self.name, logger.ERROR) + logger.error('%s: Invalid host' % self.name) return False except requests.exceptions.HTTPError as e: - logger.log('%s: Invalid HTTP request %s' % (self.name, ex(e)), logger.ERROR) + logger.error('%s: Invalid HTTP request %s' % (self.name, ex(e))) return False except requests.exceptions.Timeout as e: - logger.log('%s: Connection timeout %s' % (self.name, ex(e)), logger.ERROR) + logger.error('%s: Connection timeout %s' % (self.name, ex(e))) return False except (BaseException, Exception) as e: - logger.log('%s: Unknown exception raised when sending torrent to %s: %s' % (self.name, self.name, ex(e)), - logger.ERROR) + logger.error('%s: Unknown exception raised when sending torrent to %s: %s' % (self.name, self.name, ex(e))) return False if 401 == response.status_code: - logger.log('%s: Invalid username or password, check your config' % self.name, logger.ERROR) + logger.error('%s: Invalid username or password, check your config' % self.name) return False if response.status_code in http_error_code: - logger.log('%s: %s' % (self.name, http_error_code[response.status_code]), logger.DEBUG) + logger.debug('%s: %s' % (self.name, http_error_code[response.status_code])) return False - logger.log('%s: Response to %s request is %s' % (self.name, method.upper(), response.text), logger.DEBUG) + logger.debug('%s: Response to %s request is %s' % (self.name, method.upper(), response.text)) return response @@ -213,10 +212,10 @@ class GenericClient(object): r_code = False - logger.log('Calling %s client' % self.name, logger.DEBUG) + logger.debug('Calling %s client' % self.name) if not self._get_auth(): - logger.log('%s: Authentication failed' % self.name, logger.ERROR) + logger.error('%s: Authentication failed' % self.name) return r_code try: @@ -225,8 +224,8 @@ class GenericClient(object): result = self._get_torrent_hash(result) except (BaseException, Exception) as e: - logger.log('Bad torrent data: hash is %s for [%s]' % (result.hash, result.name), logger.ERROR) - logger.log('Exception raised when checking torrent data: %s' % (ex(e)), logger.DEBUG) + logger.error('Bad torrent data: hash is %s for [%s]' % (result.hash, result.name)) + logger.debug('Exception raised when checking torrent data: %s' % (ex(e))) return r_code try: @@ -237,30 +236,30 @@ class GenericClient(object): self.created_id = isinstance(r_code, string_types) and r_code or None if not r_code: - logger.log('%s: Unable to send torrent to client' % self.name, logger.ERROR) + logger.error('%s: Unable to send torrent to client' % self.name) return False if not self._set_torrent_pause(result): - logger.log('%s: Unable to set the pause for torrent' % self.name, logger.ERROR) + logger.error('%s: Unable to set the pause for torrent' % self.name) if not self._set_torrent_label(result): - logger.log('%s: Unable to set the label for torrent' % self.name, logger.ERROR) + logger.error('%s: Unable to set the label for torrent' % self.name) if not self._set_torrent_ratio(result): - logger.log('%s: Unable to set the ratio for torrent' % self.name, logger.ERROR) + logger.error('%s: Unable to set the ratio for torrent' % self.name) if not self._set_torrent_seed_time(result): - logger.log('%s: Unable to set the seed time for torrent' % self.name, logger.ERROR) + logger.error('%s: Unable to set the seed time for torrent' % self.name) if not self._set_torrent_path(result): - logger.log('%s: Unable to set the path for torrent' % self.name, logger.ERROR) + logger.error('%s: Unable to set the path for torrent' % self.name) if 0 != result.priority and not self._set_torrent_priority(result): - logger.log('%s: Unable to set priority for torrent' % self.name, logger.ERROR) + logger.error('%s: Unable to set priority for torrent' % self.name) except (BaseException, Exception) as e: - logger.log('%s: Failed sending torrent: %s - %s' % (self.name, result.name, result.hash), logger.ERROR) - logger.log('%s: Exception raised when sending torrent: %s' % (self.name, ex(e)), logger.DEBUG) + logger.error('%s: Failed sending torrent: %s - %s' % (self.name, result.name, result.hash)) + logger.debug('%s: Exception raised when sending torrent: %s' % (self.name, ex(e))) return r_code diff --git a/sickgear/clients/qbittorrent.py b/sickgear/clients/qbittorrent.py index b9711e89..6f571128 100644 --- a/sickgear/clients/qbittorrent.py +++ b/sickgear/clients/qbittorrent.py @@ -168,7 +168,7 @@ class QbittorrentAPI(GenericClient): task = self._tinf(t.get('hash'), use_props=False, err=True)[0] return 1 < task.get('priority') or self._ignore_state(task) # then mark fail elif isinstance(response, string_types) and 'queueing' in response.lower(): - logger.log('%s: %s' % (self.name, response), logger.ERROR) + logger.error('%s: %s' % (self.name, response)) return not mark_fail return mark_fail @@ -195,7 +195,7 @@ class QbittorrentAPI(GenericClient): task = self._tinf(t.get('hash'), use_props=False, err=True)[0] return label not in task.get('category') or self._ignore_state(task) # then mark fail elif isinstance(response, string_types) and 'incorrect' in response.lower(): - logger.log('%s: %s. "%s" isn\'t known to qB' % (self.name, response, label), logger.ERROR) + logger.error('%s: %s. "%s" isn\'t known to qB' % (self.name, response, label)) return not mark_fail return mark_fail @@ -312,7 +312,7 @@ class QbittorrentAPI(GenericClient): i = 0 while retry_ids: for i in tries: - logger.log('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i), logger.DEBUG) + logger.debug('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i)) time.sleep(i) item['fail'] = [] for task in filter(filter_func, self._tinf(retry_ids, use_props=False, err=True)): @@ -324,8 +324,8 @@ class QbittorrentAPI(GenericClient): retry_ids = item['fail'] else: if max(tries) == i: - logger.log('%s: failed to %s %s item(s) after %s tries over %s mins, aborted' % - (self.name, act, len(item['fail']), len(tries), sum(tries) / 60), logger.DEBUG) + logger.debug('%s: failed to %s %s item(s) after %s tries over %s mins, aborted' % + (self.name, act, len(item['fail']), len(tries), sum(tries) / 60)) return (item['fail'] + item['ignore']) or True @@ -356,7 +356,7 @@ class QbittorrentAPI(GenericClient): :return: True if created, else Falsy if nothing created """ if self._tinf(data.hash): - logger.log('Could not create task, the hash is already in use', logger.ERROR) + logger.error('Could not create task, the hash is already in use') return label = sickgear.TORRENT_LABEL.replace(' ', '_') @@ -401,7 +401,7 @@ class QbittorrentAPI(GenericClient): authless = bool(re.search('(?i)login|version', cmd)) if authless or self.auth: if not authless and not self._get_auth(): - logger.log('%s: Authentication failed' % self.name, logger.ERROR) + logger.error('%s: Authentication failed' % self.name) return # self._log_request_details('%s%s' % (self.api_ns, cmd.strip('/')), **kwargs) @@ -431,7 +431,7 @@ class QbittorrentAPI(GenericClient): self.api_ns = 'api/v2/' response = self._client_request('auth/login', post_data=post_data, raise_status_code=True) if isinstance(response, string_types) and 'banned' in response.lower(): - logger.log('%s: %s' % (self.name, response), logger.ERROR) + logger.error('%s: %s' % (self.name, response)) response = False elif not response: self.api_ns = '' diff --git a/sickgear/clients/rtorrent.py b/sickgear/clients/rtorrent.py index 332428cd..4643fa46 100644 --- a/sickgear/clients/rtorrent.py +++ b/sickgear/clients/rtorrent.py @@ -43,7 +43,7 @@ class RtorrentAPI(GenericClient): if self.auth: try: if self.auth.has_local_id(data.hash): - logger.log('%s: Item already exists %s' % (self.name, data.name), logger.WARNING) + logger.warning('%s: Item already exists %s' % (self.name, data.name)) raise custom_var = (1, sickgear.TORRENT_LABEL_VAR or '')[0 <= sickgear.TORRENT_LABEL_VAR <= 5] @@ -62,8 +62,8 @@ class RtorrentAPI(GenericClient): if torrent and sickgear.TORRENT_LABEL: label = torrent.get_custom(custom_var) if sickgear.TORRENT_LABEL != label: - logger.log('%s: could not change custom%s label value \'%s\' to \'%s\' for %s' % ( - self.name, custom_var, label, sickgear.TORRENT_LABEL, torrent.name), logger.WARNING) + logger.warning('%s: could not change custom%s label value \'%s\' to \'%s\' for %s' % ( + self.name, custom_var, label, sickgear.TORRENT_LABEL, torrent.name)) except (BaseException, Exception): pass diff --git a/sickgear/clients/transmission.py b/sickgear/clients/transmission.py index 8fb5810a..02a9b8da 100644 --- a/sickgear/clients/transmission.py +++ b/sickgear/clients/transmission.py @@ -86,7 +86,7 @@ class TransmissionAPI(GenericClient): # populate blanked and download_dir if not self._get_auth(): - logger.log('%s: Authentication failed' % self.name, logger.ERROR) + logger.error('%s: Authentication failed' % self.name) return False download_dir = None @@ -95,7 +95,7 @@ class TransmissionAPI(GenericClient): elif self.download_dir: download_dir = self.download_dir else: - logger.log('Path required for Transmission Downloaded files location', logger.ERROR) + logger.error('Path required for Transmission Downloaded files location') if not download_dir and not self.blankable: return False diff --git a/sickgear/common.py b/sickgear/common.py index e66b946a..ae5ecaa2 100644 --- a/sickgear/common.py +++ b/sickgear/common.py @@ -300,7 +300,7 @@ class Quality(object): if not hd_options and full_hd: return Quality.FULLHDBLURAY if sickgear.ANIME_TREAT_AS_HDTV: - logger.log(u'Treating file: %s with "unknown" quality as HDTV per user settings' % name, logger.DEBUG) + logger.debug(f'Treating file: {name} with "unknown" quality as HDTV per user settings') return Quality.HDTV return Quality.UNKNOWN @@ -371,10 +371,10 @@ class Quality(object): try: parser = createParser(filename) except InputStreamError as e: - logger.log(msg % (filename, ex(e)), logger.WARNING) + logger.warning(msg % (filename, ex(e))) except (BaseException, Exception) as e: - logger.log(msg % (filename, ex(e)), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error(msg % (filename, ex(e))) + logger.error(traceback.format_exc()) if parser: extract = None @@ -385,7 +385,7 @@ class Quality(object): parser.parse_comments = False extract = extractMetadata(parser, **args) except (BaseException, Exception) as e: - logger.log(msg % (filename, ex(e)), logger.WARNING) + logger.warning(msg % (filename, ex(e))) if extract: try: height = extract.get('height') diff --git a/sickgear/config.py b/sickgear/config.py index 9fb7aa98..c7adc21a 100644 --- a/sickgear/config.py +++ b/sickgear/config.py @@ -56,7 +56,7 @@ def change_https_cert(https_cert): if os.path.normpath(sickgear.HTTPS_CERT) != os.path.normpath(https_cert): if helpers.make_dir(os.path.dirname(os.path.abspath(https_cert))): sickgear.HTTPS_CERT = os.path.normpath(https_cert) - logger.log(u'Changed https cert path to %s' % https_cert) + logger.log(f'Changed https cert path to {https_cert}') else: return False @@ -71,7 +71,7 @@ def change_https_key(https_key): if os.path.normpath(sickgear.HTTPS_KEY) != os.path.normpath(https_key): if helpers.make_dir(os.path.dirname(os.path.abspath(https_key))): sickgear.HTTPS_KEY = os.path.normpath(https_key) - logger.log(u'Changed https key path to %s' % https_key) + logger.log(f'Changed https key path to {https_key}') else: return False @@ -89,7 +89,7 @@ def change_log_dir(log_dir, web_log): sickgear.LOG_DIR = abs_log_dir logger.sb_log_instance.init_logging() - logger.log(u'Initialized new log file in %s' % sickgear.LOG_DIR) + logger.log(f'Initialized new log file in {sickgear.LOG_DIR}') log_dir_changed = True else: @@ -109,7 +109,7 @@ def change_nzb_dir(nzb_dir): if os.path.normpath(sickgear.NZB_DIR) != os.path.normpath(nzb_dir): if helpers.make_dir(nzb_dir): sickgear.NZB_DIR = os.path.normpath(nzb_dir) - logger.log(u'Changed NZB folder to %s' % nzb_dir) + logger.log(f'Changed NZB folder to {nzb_dir}') else: return False @@ -124,7 +124,7 @@ def change_torrent_dir(torrent_dir): if os.path.normpath(sickgear.TORRENT_DIR) != os.path.normpath(torrent_dir): if helpers.make_dir(torrent_dir): sickgear.TORRENT_DIR = os.path.normpath(torrent_dir) - logger.log(u'Changed torrent folder to %s' % torrent_dir) + logger.log(f'Changed torrent folder to {torrent_dir}') else: return False @@ -139,7 +139,7 @@ def change_tv_download_dir(tv_download_dir): if os.path.normpath(sickgear.TV_DOWNLOAD_DIR) != os.path.normpath(tv_download_dir): if helpers.make_dir(tv_download_dir): sickgear.TV_DOWNLOAD_DIR = os.path.normpath(tv_download_dir) - logger.log(u'Changed TV download folder to %s' % tv_download_dir) + logger.log(f'Changed TV download folder to {tv_download_dir}') else: return False @@ -407,7 +407,7 @@ def check_setting_int(config, cfg_name, item_name, def_val): except (BaseException, Exception): config[cfg_name] = {} config[cfg_name][item_name] = my_val - logger.log('%s -> %s' % (item_name, my_val), logger.DEBUG) + logger.debug('%s -> %s' % (item_name, my_val)) return my_val @@ -422,7 +422,7 @@ def check_setting_float(config, cfg_name, item_name, def_val): config[cfg_name] = {} config[cfg_name][item_name] = my_val - logger.log('%s -> %s' % (item_name, my_val), logger.DEBUG) + logger.debug('%s -> %s' % (item_name, my_val)) return my_val @@ -449,9 +449,9 @@ def check_setting_str(config, cfg_name, item_name, def_val, log=True): config[cfg_name][item_name] = helpers.encrypt(my_val, encryption_version) if log: - logger.log('%s -> %s' % (item_name, my_val), logger.DEBUG) + logger.debug('%s -> %s' % (item_name, my_val)) else: - logger.log('%s -> ******' % item_name, logger.DEBUG) + logger.debug('%s -> ******' % item_name) return (my_val, def_val)['None' == my_val] @@ -497,9 +497,10 @@ class ConfigMigrator(object): if self.config_version > self.expected_config_version: logger.log_error_and_exit( - u'Your config version (%s) has been incremented past what this version of SickGear supports (%s).\n' - 'If you have used other forks or a newer version of SickGear, your config file may be unusable due to ' - 'their modifications.' % (self.config_version, self.expected_config_version)) + f'Your config version ({self.config_version})' + f' has been incremented past what this version of SickGear supports ({self.expected_config_version}).\n' + f'If you have used other forks or a newer version of SickGear,' + f' your config file may be unusable due to their modifications.') sickgear.CONFIG_VERSION = self.config_version @@ -511,20 +512,20 @@ class ConfigMigrator(object): else: migration_name = '' - logger.log(u'Backing up config before upgrade') + logger.log('Backing up config before upgrade') if not helpers.backup_versioned_file(sickgear.CONFIG_FILE, self.config_version): - logger.log_error_and_exit(u'Config backup failed, abort upgrading config') + logger.log_error_and_exit('Config backup failed, abort upgrading config') else: - logger.log(u'Proceeding with upgrade') + logger.log('Proceeding with upgrade') # do the migration, expect a method named _migrate_v - logger.log(u'Migrating config up to version %s %s' % (next_version, migration_name)) + logger.log(f'Migrating config up to version {next_version} {migration_name}') getattr(self, '_migrate_v%s' % next_version)() self.config_version = next_version # save new config after migration sickgear.CONFIG_VERSION = self.config_version - logger.log(u'Saving config file to disk') + logger.log('Saving config file to disk') sickgear.save_config() @staticmethod @@ -569,17 +570,17 @@ class ConfigMigrator(object): new_season_format = str(new_season_format).replace('09', '%0S') new_season_format = new_season_format.replace('9', '%S') - logger.log(u'Changed season folder format from %s to %s, prepending it to your naming config' % - (old_season_format, new_season_format)) + logger.log(f'Changed season folder format from {old_season_format} to {new_season_format},' + f' prepending it to your naming config') sickgear.NAMING_PATTERN = new_season_format + os.sep + sickgear.NAMING_PATTERN except (TypeError, ValueError): - logger.log(u'Can not change %s to new season format' % old_season_format, logger.ERROR) + logger.error(f'Can not change {old_season_format} to new season format') # if no shows had it on then don't flatten any shows and don't put season folders in the config else: - logger.log(u'No shows were using season folders before so I am disabling flattening on all shows') + logger.log('No shows were using season folders before so I am disabling flattening on all shows') # don't flatten any shows at all my_db.action('UPDATE tv_shows SET flatten_folders = 0 WHERE 1=1') @@ -672,8 +673,7 @@ class ConfigMigrator(object): try: name, url, key, enabled = cur_provider_data.split('|') except ValueError: - logger.log(u'Skipping Newznab provider string: "%s", incorrect format' % cur_provider_data, - logger.ERROR) + logger.error(f'Skipping Newznab provider string: "{cur_provider_data}", incorrect format') continue cat_ids = '5030,5040,5060' @@ -727,7 +727,7 @@ class ConfigMigrator(object): cur_metadata = metadata.split('|') # if target has the old number of values, do upgrade if 6 == len(cur_metadata): - logger.log(u'Upgrading ' + metadata_name + ' metadata, old value: ' + metadata) + logger.log('Upgrading ' + metadata_name + ' metadata, old value: ' + metadata) cur_metadata.insert(4, '0') cur_metadata.append('0') cur_metadata.append('0') @@ -740,15 +740,15 @@ class ConfigMigrator(object): cur_metadata[4], cur_metadata[3] = cur_metadata[3], '0' # write new format metadata = '|'.join(cur_metadata) - logger.log(u'Upgrading %s metadata, new value: %s' % (metadata_name, metadata)) + logger.log(f'Upgrading {metadata_name} metadata, new value: {metadata}') elif 10 == len(cur_metadata): metadata = '|'.join(cur_metadata) - logger.log(u'Keeping %s metadata, value: %s' % (metadata_name, metadata)) + logger.log(f'Keeping {metadata_name} metadata, value: {metadata}') else: - logger.log(u'Skipping %s: "%s", incorrect format' % (metadata_name, metadata), logger.ERROR) + logger.error(f'Skipping {metadata_name}: "{metadata}", incorrect format') metadata = '0|0|0|0|0|0|0|0|0|0' - logger.log(u'Setting %s metadata, new value: %s' % (metadata_name, metadata)) + logger.log(f'Setting {metadata_name} metadata, new value: {metadata}') return metadata diff --git a/sickgear/databases/mainDB.py b/sickgear/databases/mainDB.py index c51e3108..b5fbcc93 100644 --- a/sickgear/databases/mainDB.py +++ b/sickgear/databases/mainDB.py @@ -86,7 +86,7 @@ class MainSanityCheck(db.DBSanityCheck): if 0 < len(cl): self.connection.mass_action(cl) - logger.log(u'Performing a vacuum on the database.', logger.DEBUG) + logger.debug('Performing a vacuum on the database.') self.connection.upgrade_log(fix_msg % 'VACUUM') self.connection.action('VACUUM') self.connection.upgrade_log(fix_msg % 'finished') @@ -111,8 +111,7 @@ class MainSanityCheck(db.DBSanityCheck): for cur_result in sql_result: - logger.log(u'Duplicate show detected! %s: %s count: %s' % ( - column, cur_result[column], cur_result['count']), logger.DEBUG) + logger.debug(f'Duplicate show detected! {column}: {cur_result[column]} count: {cur_result["count"]}') cur_dupe_results = self.connection.select( 'SELECT show_id, ' + column + ' FROM tv_shows WHERE ' + column + ' = ? LIMIT ?', @@ -121,15 +120,15 @@ class MainSanityCheck(db.DBSanityCheck): cl = [] for cur_dupe_id in cur_dupe_results: - logger.log(u'Deleting duplicate show with %s: %s show_id: %s' % ( - column, cur_dupe_id[column], cur_dupe_id['show_id'])) + logger.log(f'Deleting duplicate show with {column}: {cur_dupe_id[column]}' + f' show_id: {cur_dupe_id["show_id"]}') cl.append(['DELETE FROM tv_shows WHERE show_id = ?', [cur_dupe_id['show_id']]]) if 0 < len(cl): self.connection.mass_action(cl) else: - logger.log(u'No duplicate show, check passed') + logger.log('No duplicate show, check passed') def fix_duplicate_episodes(self): @@ -146,9 +145,9 @@ class MainSanityCheck(db.DBSanityCheck): for cur_result in sql_result: - logger.log(u'Duplicate episode detected! prod_id: %s season: %s episode: %s count: %s' % - (cur_result['prod_id'], cur_result['season'], cur_result['episode'], - cur_result['count']), logger.DEBUG) + logger.debug(f'Duplicate episode detected! prod_id: {cur_result["prod_id"]}' + f' season: {cur_result["season"]} episode: {cur_result["episode"]}' + f' count: {cur_result["count"]}') cur_dupe_results = self.connection.select( 'SELECT episode_id' @@ -163,14 +162,14 @@ class MainSanityCheck(db.DBSanityCheck): cl = [] for cur_dupe_id in cur_dupe_results: - logger.log(u'Deleting duplicate episode with episode_id: %s' % cur_dupe_id['episode_id']) - cl.append(['DELETE FROM tv_episodes WHERE episode_id = ?', [cur_dupe_id['episode_id']]]) + logger.log(f'Deleting duplicate episode with episode_id: {cur_dupe_id["episode_id"]}') + cl.append(['DELETE FROM tv_episodes WHERE episode_id = ?', [cur_dupe_id["episode_id"]]]) if 0 < len(cl): self.connection.mass_action(cl) else: - logger.log(u'No duplicate episode, check passed') + logger.log('No duplicate episode, check passed') def fix_orphan_episodes(self): @@ -182,16 +181,16 @@ class MainSanityCheck(db.DBSanityCheck): cl = [] for cur_result in sql_result: - logger.log(u'Orphan episode detected! episode_id: %s showid: %s' % ( - cur_result['episode_id'], cur_result['showid']), logger.DEBUG) - logger.log(u'Deleting orphan episode with episode_id: %s' % cur_result['episode_id']) + logger.debug(f'Orphan episode detected! episode_id: {cur_result["episode_id"]}' + f' showid: {cur_result["showid"]}') + logger.log(f'Deleting orphan episode with episode_id: {cur_result["episode_id"]}') cl.append(['DELETE FROM tv_episodes WHERE episode_id = ?', [cur_result['episode_id']]]) if 0 < len(cl): self.connection.mass_action(cl) else: - logger.log(u'No orphan episodes, check passed') + logger.log('No orphan episodes, check passed') def fix_missing_table_indexes(self): if not self.connection.select('PRAGMA index_info("idx_indexer_id")'): @@ -240,9 +239,9 @@ class MainSanityCheck(db.DBSanityCheck): cl = [] for cur_result in sql_result: - logger.log(u'UNAIRED episode detected! episode_id: %s showid: %s' % ( - cur_result['episode_id'], cur_result['showid']), logger.DEBUG) - logger.log(u'Fixing unaired episode status with episode_id: %s' % cur_result['episode_id']) + logger.debug(f'UNAIRED episode detected! episode_id: {cur_result["episode_id"]}' + f' showid: {cur_result["showid"]}') + logger.log(f'Fixing unaired episode status with episode_id: {cur_result["episode_id"]}') cl.append(['UPDATE tv_episodes SET status = ? WHERE episode_id = ?', [common.UNAIRED, cur_result['episode_id']]]) @@ -250,7 +249,7 @@ class MainSanityCheck(db.DBSanityCheck): self.connection.mass_action(cl) else: - logger.log(u'No UNAIRED episodes, check passed') + logger.log('No UNAIRED episodes, check passed') def fix_scene_exceptions(self): @@ -387,21 +386,17 @@ class InitialSchema(db.SchemaUpgrade): if cur_db_version < MIN_DB_VERSION: logger.log_error_and_exit( - u'Your database version (' + str(cur_db_version) - + ') is too old to migrate from what this version of SickGear supports (' - + str(MIN_DB_VERSION) + ').' + "\n" + f'Your database version ({cur_db_version}) is too old to migrate from' + f' what this version of SickGear supports ({MIN_DB_VERSION}).\n' + 'Upgrade using a previous version (tag) build 496 to build 501 of SickGear' - ' first or remove database file to begin fresh.' - ) + ' first or remove database file to begin fresh.') if cur_db_version > MAX_DB_VERSION: logger.log_error_and_exit( - u'Your database version (' + str(cur_db_version) - + ') has been incremented past what this version of SickGear supports (' - + str(MAX_DB_VERSION) + ').\n' + f'Your database version ({cur_db_version}) has been incremented past' + f' what this version of SickGear supports ({MAX_DB_VERSION}).\n' + 'If you have used other forks of SickGear,' - ' your database may be unusable due to their modifications.' - ) + ' your database may be unusable due to their modifications.') return self.call_check_db_version() @@ -423,7 +418,7 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): sql_result = self.connection.select('SELECT episode_id, location, file_size FROM tv_episodes') - self.upgrade_log(u'Adding file size to all episodes in DB, please be patient') + self.upgrade_log('Adding file size to all episodes in DB, please be patient') for cur_result in sql_result: if not cur_result['location']: continue @@ -439,7 +434,7 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): # noinspection SqlRedundantOrderingDirection history_sql_result = self.connection.select('SELECT * FROM history WHERE provider != -1 ORDER BY date ASC') - self.upgrade_log(u'Adding release name to all episodes still in history') + self.upgrade_log('Adding release name to all episodes still in history') for cur_result in history_sql_result: # find the associated download, if there isn't one then ignore it # noinspection SqlResolve @@ -449,8 +444,8 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): ' WHERE provider = -1 AND showid = ? AND season = ? AND episode = ? AND date > ?', [cur_result['showid'], cur_result['season'], cur_result['episode'], cur_result['date']]) if not download_sql_result: - self.upgrade_log(u'Found a snatch in the history for ' + cur_result['resource'] - + ' but couldn\'t find the associated download, skipping it', logger.DEBUG) + self.upgrade_log(f'Found a snatch in the history for {cur_result["resource"]}' + f' but couldn\'t find the associated download, skipping it', logger.DEBUG) continue nzb_name = cur_result['resource'] @@ -468,9 +463,8 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): ' WHERE showid = ? AND season = ? AND episode = ? AND location != ""', [cur_result['showid'], cur_result['season'], cur_result['episode']]) if not sql_result: - logger.log( - u'The episode ' + nzb_name + ' was found in history but doesn\'t exist on disk anymore, skipping', - logger.DEBUG) + logger.debug(f'The episode {nzb_name} was found in history but doesn\'t exist on disk anymore,' + f' skipping') continue # get the status/quality of the existing ep and make sure it's what we expect @@ -483,7 +477,7 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): # make sure this is actually a real release name and not a season pack or something for cur_name in (nzb_name, file_name): - logger.log(u'Checking if ' + cur_name + ' is actually a good release name', logger.DEBUG) + logger.debug(f'Checking if {cur_name} is actually a good release name') try: np = NameParser(False) parse_result = np.parse(cur_name) @@ -503,7 +497,7 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): ' FROM tv_episodes' ' WHERE release_name = ""') - self.upgrade_log(u'Adding release name to all episodes with obvious scene filenames') + self.upgrade_log('Adding release name to all episodes with obvious scene filenames') for cur_result in empty_sql_result: ep_file_name = os.path.basename(cur_result['location']) @@ -522,9 +516,7 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade): if not parse_result.release_group: continue - logger.log( - u'Name ' + ep_file_name + ' gave release group of ' + parse_result.release_group + ', seems valid', - logger.DEBUG) + logger.debug(f'Name {ep_file_name} gave release group of {parse_result.release_group}, seems valid') self.connection.action('UPDATE tv_episodes SET release_name = ? WHERE episode_id = ?', [ep_file_name, cur_result['episode_id']]) @@ -651,7 +643,7 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): common.Quality.UNKNOWN], []) # update qualities (including templates) - self.upgrade_log(u'[1/4] Updating pre-defined templates and the quality for each show...') + self.upgrade_log('[1/4] Updating pre-defined templates and the quality for each show...') cl = [] shows = self.connection.select('SELECT * FROM tv_shows') for cur_show in shows: @@ -666,7 +658,7 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): # update status that are are within the old hdwebdl # (1<<3 which is 8) and better -- exclude unknown (1<<15 which is 32768) - self.upgrade_log(u'[2/4] Updating the status for the episodes within each show...') + self.upgrade_log('[2/4] Updating the status for the episodes within each show...') cl = [] sql_result = self.connection.select('SELECT * FROM tv_episodes WHERE status < 3276800 AND status >= 800') for cur_result in sql_result: @@ -678,7 +670,7 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): # may not always coordinate together # update previous history so it shows the correct action - self.upgrade_log(u'[3/4] Updating history to reflect the correct action...') + self.upgrade_log('[3/4] Updating history to reflect the correct action...') cl = [] # noinspection SqlResolve history_action = self.connection.select('SELECT * FROM history WHERE action < 3276800 AND action >= 800') @@ -688,7 +680,7 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): self.connection.mass_action(cl) # update previous history so it shows the correct quality - self.upgrade_log(u'[4/4] Updating history to reflect the correct quality...') + self.upgrade_log('[4/4] Updating history to reflect the correct quality...') cl = [] # noinspection SqlResolve history_quality = self.connection.select('SELECT * FROM history WHERE quality < 32768 AND quality >= 8') @@ -700,7 +692,7 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade): self.inc_db_version() # cleanup and reduce db if any previous data was removed - self.upgrade_log(u'Performing a vacuum on the database.', logger.DEBUG) + self.upgrade_log('Performing a vacuum on the database.', logger.DEBUG) self.connection.action('VACUUM') return self.call_check_db_version() @@ -712,10 +704,10 @@ class AddShowidTvdbidIndex(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Checking for duplicate shows before adding unique index.') + self.upgrade_log('Checking for duplicate shows before adding unique index.') MainSanityCheck(self.connection).fix_duplicate_shows('tvdb_id') - self.upgrade_log(u'Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries.') + self.upgrade_log('Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries.') if not self.has_table('idx_showid'): self.connection.action('CREATE INDEX idx_showid ON tv_episodes (showid);') if not self.has_table('idx_tvdb_id'): @@ -732,7 +724,7 @@ class AddLastUpdateTVDB(db.SchemaUpgrade): def execute(self): if not self.has_column('tv_shows', 'last_update_tvdb'): - self.upgrade_log(u'Adding column last_update_tvdb to tv_shows') + self.upgrade_log('Adding column last_update_tvdb to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'last_update_tvdb', default=1) @@ -745,7 +737,7 @@ class AddDBIncreaseTo15(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version to v%s' % self.call_check_db_version()) + self.upgrade_log(f'Bumping database version to v{self.call_check_db_version()}') self.inc_db_version() return self.call_check_db_version() @@ -756,7 +748,7 @@ class AddIMDbInfo(db.SchemaUpgrade): db_backed_up = False if not self.has_table('imdb_info'): - self.upgrade_log(u'Creating IMDb table imdb_info') + self.upgrade_log('Creating IMDb table imdb_info') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True @@ -766,7 +758,7 @@ class AddIMDbInfo(db.SchemaUpgrade): ' rating TEXT, votes INTEGER, last_update NUMERIC)') if not self.has_column('tv_shows', 'imdb_id'): - self.upgrade_log(u'Adding IMDb column imdb_id to tv_shows') + self.upgrade_log('Adding IMDb column imdb_id to tv_shows') if not db_backed_up: db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) @@ -786,7 +778,7 @@ class AddProperNamingSupport(db.SchemaUpgrade): return self.set_db_version(5816) if not self.has_column('tv_episodes', 'is_proper'): - self.upgrade_log(u'Adding column is_proper to tv_episodes') + self.upgrade_log('Adding column is_proper to tv_episodes') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_episodes', 'is_proper') @@ -805,7 +797,7 @@ class AddEmailSubscriptionTable(db.SchemaUpgrade): return self.set_db_version(5817) if not self.has_column('tv_shows', 'notify_list'): - self.upgrade_log(u'Adding column notify_list to tv_shows') + self.upgrade_log('Adding column notify_list to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'notify_list', 'TEXT', None) @@ -827,7 +819,7 @@ class AddProperSearch(db.SchemaUpgrade): return self.set_db_version(5818) if not self.has_column('info', 'last_proper_search'): - self.upgrade_log(u'Adding column last_proper_search to info') + self.upgrade_log('Adding column last_proper_search to info') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('info', 'last_proper_search', default=1) @@ -839,7 +831,7 @@ class AddProperSearch(db.SchemaUpgrade): class AddDvdOrderOption(db.SchemaUpgrade): def execute(self): if not self.has_column('tv_shows', 'dvdorder'): - self.upgrade_log(u'Adding column dvdorder to tv_shows') + self.upgrade_log('Adding column dvdorder to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'dvdorder', 'NUMERIC', '0') @@ -851,7 +843,7 @@ class AddDvdOrderOption(db.SchemaUpgrade): class AddSubtitlesSupport(db.SchemaUpgrade): def execute(self): if not self.has_column('tv_shows', 'subtitles'): - self.upgrade_log(u'Adding subtitles to tv_shows and tv_episodes') + self.upgrade_log('Adding subtitles to tv_shows and tv_episodes') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'subtitles') self.add_column('tv_episodes', 'subtitles', 'TEXT', '') @@ -867,10 +859,10 @@ class ConvertTVShowsToIndexerScheme(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Converting TV Shows table to Indexer Scheme...') + self.upgrade_log('Converting TV Shows table to Indexer Scheme...') if self.has_table('tmp_tv_shows'): - self.upgrade_log(u'Removing temp tv show tables left behind from previous updates...') + self.upgrade_log('Removing temp tv show tables left behind from previous updates...') # noinspection SqlResolve self.connection.action('DROP TABLE tmp_tv_shows') @@ -908,10 +900,10 @@ class ConvertTVEpisodesToIndexerScheme(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Converting TV Episodes table to Indexer Scheme...') + self.upgrade_log('Converting TV Episodes table to Indexer Scheme...') if self.has_table('tmp_tv_episodes'): - self.upgrade_log(u'Removing temp tv episode tables left behind from previous updates...') + self.upgrade_log('Removing temp tv episode tables left behind from previous updates...') # noinspection SqlResolve self.connection.action('DROP TABLE tmp_tv_episodes') @@ -949,10 +941,10 @@ class ConvertIMDBInfoToIndexerScheme(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Converting IMDb Info table to Indexer Scheme...') + self.upgrade_log('Converting IMDb Info table to Indexer Scheme...') if self.has_table('tmp_imdb_info'): - self.upgrade_log(u'Removing temp imdb info tables left behind from previous updates...') + self.upgrade_log('Removing temp imdb info tables left behind from previous updates...') # noinspection SqlResolve self.connection.action('DROP TABLE tmp_imdb_info') @@ -978,10 +970,10 @@ class ConvertInfoToIndexerScheme(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Converting Info table to Indexer Scheme...') + self.upgrade_log('Converting Info table to Indexer Scheme...') if self.has_table('tmp_info'): - self.upgrade_log(u'Removing temp info tables left behind from previous updates...') + self.upgrade_log('Removing temp info tables left behind from previous updates...') # noinspection SqlResolve self.connection.action('DROP TABLE tmp_info') @@ -1005,7 +997,7 @@ class AddArchiveFirstMatchOption(db.SchemaUpgrade): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) if not self.has_column('tv_shows', 'archive_firstmatch'): - self.upgrade_log(u'Adding column archive_firstmatch to tv_shows') + self.upgrade_log('Adding column archive_firstmatch to tv_shows') self.add_column('tv_shows', 'archive_firstmatch', 'NUMERIC', '0') self.inc_db_version() @@ -1020,7 +1012,7 @@ class AddSceneNumbering(db.SchemaUpgrade): if self.has_table('scene_numbering'): self.connection.action('DROP TABLE scene_numbering') - self.upgrade_log(u'Upgrading table scene_numbering ...') + self.upgrade_log('Upgrading table scene_numbering ...') self.connection.action( 'CREATE TABLE scene_numbering (indexer TEXT, indexer_id INTEGER, season INTEGER, episode INTEGER,' ' scene_season INTEGER, scene_episode INTEGER,' @@ -1036,7 +1028,7 @@ class ConvertIndexerToInteger(db.SchemaUpgrade): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) cl = [] - self.upgrade_log(u'Converting Indexer to Integer ...') + self.upgrade_log('Converting Indexer to Integer ...') cl.append(['UPDATE tv_shows SET indexer = ? WHERE LOWER(indexer) = ?', ['1', 'tvdb']]) cl.append(['UPDATE tv_shows SET indexer = ? WHERE LOWER(indexer) = ?', ['2', 'tvrage']]) cl.append(['UPDATE tv_episodes SET indexer = ? WHERE LOWER(indexer) = ?', ['1', 'tvdb']]) @@ -1060,13 +1052,13 @@ class AddRequireAndIgnoreWords(db.SchemaUpgrade): db_backed_up = False if not self.has_column('tv_shows', 'rls_require_words'): - self.upgrade_log(u'Adding column rls_require_words to tv_shows') + self.upgrade_log('Adding column rls_require_words to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True self.add_column('tv_shows', 'rls_require_words', 'TEXT', '') if not self.has_column('tv_shows', 'rls_ignore_words'): - self.upgrade_log(u'Adding column rls_ignore_words to tv_shows') + self.upgrade_log('Adding column rls_ignore_words to tv_shows') if not db_backed_up: db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'rls_ignore_words', 'TEXT', '') @@ -1080,14 +1072,14 @@ class AddSportsOption(db.SchemaUpgrade): def execute(self): db_backed_up = False if not self.has_column('tv_shows', 'sports'): - self.upgrade_log(u'Adding column sports to tv_shows') + self.upgrade_log('Adding column sports to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True self.add_column('tv_shows', 'sports', 'NUMERIC', '0') if self.has_column('tv_shows', 'air_by_date') and self.has_column('tv_shows', 'sports'): # update sports column - self.upgrade_log(u'[4/4] Updating tv_shows to reflect the correct sports value...') + self.upgrade_log('[4/4] Updating tv_shows to reflect the correct sports value...') if not db_backed_up: db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) cl = [] @@ -1108,7 +1100,7 @@ class AddSceneNumberingToTvEpisodes(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding columns scene_season and scene_episode to tvepisodes') + self.upgrade_log('Adding columns scene_season and scene_episode to tvepisodes') self.add_column('tv_episodes', 'scene_season', 'NUMERIC', 'NULL') self.add_column('tv_episodes', 'scene_episode', 'NUMERIC', 'NULL') @@ -1121,7 +1113,7 @@ class AddAnimeTVShow(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding column anime to tv_episodes') + self.upgrade_log('Adding column anime to tv_episodes') self.add_column('tv_shows', 'anime', 'NUMERIC', '0') self.inc_db_version() @@ -1133,7 +1125,7 @@ class AddAbsoluteNumbering(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding column absolute_number to tv_episodes') + self.upgrade_log('Adding column absolute_number to tv_episodes') self.add_column('tv_episodes', 'absolute_number', 'NUMERIC', '0') self.inc_db_version() @@ -1145,7 +1137,7 @@ class AddSceneAbsoluteNumbering(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding columns absolute_number and scene_absolute_number to scene_numbering') + self.upgrade_log('Adding columns absolute_number and scene_absolute_number to scene_numbering') self.add_column('scene_numbering', 'absolute_number', 'NUMERIC', '0') self.add_column('scene_numbering', 'scene_absolute_number', 'NUMERIC', '0') @@ -1160,7 +1152,7 @@ class AddAnimeAllowlistBlocklist(db.SchemaUpgrade): cl = [['CREATE TABLE allowlist (show_id INTEGER, range TEXT, keyword TEXT, indexer NUMERIC)'], ['CREATE TABLE blocklist (show_id INTEGER, range TEXT, keyword TEXT, indexer NUMERIC)']] - self.upgrade_log(u'Creating tables for anime allow and block lists') + self.upgrade_log('Creating tables for anime allow and block lists') self.connection.mass_action(cl) self.inc_db_version() @@ -1172,7 +1164,7 @@ class AddSceneAbsoluteNumbering2(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding column scene_absolute_number to tv_episodes') + self.upgrade_log('Adding column scene_absolute_number to tv_episodes') self.add_column('tv_episodes', 'scene_absolute_number', 'NUMERIC', '0') self.inc_db_version() @@ -1184,7 +1176,7 @@ class AddXemRefresh(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Creating table xem_refresh') + self.upgrade_log('Creating table xem_refresh') self.connection.action( 'CREATE TABLE xem_refresh (indexer TEXT, indexer_id INTEGER PRIMARY KEY, last_refreshed INTEGER)') @@ -1197,7 +1189,7 @@ class AddSceneToTvShows(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding column scene to tv_shows') + self.upgrade_log('Adding column scene to tv_shows') self.add_column('tv_shows', 'scene', 'NUMERIC', '0') self.inc_db_version() @@ -1212,7 +1204,7 @@ class AddIndexerMapping(db.SchemaUpgrade): if self.has_table('indexer_mapping'): self.connection.action('DROP TABLE indexer_mapping') - self.upgrade_log(u'Adding table indexer_mapping') + self.upgrade_log('Adding table indexer_mapping') self.connection.action( 'CREATE TABLE indexer_mapping (indexer_id INTEGER, indexer NUMERIC, mindexer_id INTEGER, mindexer NUMERIC,' ' PRIMARY KEY (indexer_id, indexer))') @@ -1226,11 +1218,11 @@ class AddVersionToTvEpisodes(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Adding columns release_group and version to tv_episodes') + self.upgrade_log('Adding columns release_group and version to tv_episodes') self.add_column('tv_episodes', 'release_group', 'TEXT', '') self.add_column('tv_episodes', 'version', 'NUMERIC', '-1') - self.upgrade_log(u'Adding column version to history') + self.upgrade_log('Adding column version to history') self.add_column('history', 'version', 'NUMERIC', '-1') self.inc_db_version() @@ -1242,7 +1234,7 @@ class BumpDatabaseVersion(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version') + self.upgrade_log('Bumping database version') return self.set_db_version(10000) @@ -1252,7 +1244,7 @@ class Migrate41(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version') + self.upgrade_log('Bumping database version') return self.set_db_version(10001) @@ -1267,7 +1259,7 @@ class Migrate43(db.SchemaUpgrade): if self.has_table(table): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True - self.upgrade_log(u'Dropping redundant table tmdb_info') + self.upgrade_log('Dropping redundant table tmdb_info') # noinspection SqlResolve self.connection.action('DROP TABLE [%s]' % table) db_chg = True @@ -1276,7 +1268,7 @@ class Migrate43(db.SchemaUpgrade): if not db_backed_up: db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) db_backed_up = True - self.upgrade_log(u'Dropping redundant tmdb_info refs') + self.upgrade_log('Dropping redundant tmdb_info refs') self.drop_columns('tv_shows', 'tmdb_id') db_chg = True @@ -1288,7 +1280,7 @@ class Migrate43(db.SchemaUpgrade): self.connection.action('INSERT INTO db_version (db_version) VALUES (0);') if not db_chg: - self.upgrade_log(u'Bumping database version') + self.upgrade_log('Bumping database version') return self.set_db_version(10001) @@ -1298,7 +1290,7 @@ class Migrate4301(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version') + self.upgrade_log('Bumping database version') return self.set_db_version(10002) @@ -1308,7 +1300,7 @@ class Migrate4302(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version') + self.upgrade_log('Bumping database version') return self.set_db_version(10003) @@ -1318,7 +1310,7 @@ class MigrateUpstream(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Migrate SickBeard db v%s into v15' % str(self.call_check_db_version()).replace('58', '')) + self.upgrade_log(f'Migrate SickBeard db v{self.call_check_db_version().replace("58", "")} into v15') return self.set_db_version(15) @@ -1328,7 +1320,7 @@ class SickGearDatabaseVersion(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version to new SickGear standards') + self.upgrade_log('Bumping database version to new SickGear standards') return self.set_db_version(20000) @@ -1338,7 +1330,7 @@ class RemoveDefaultEpStatusFromTvShows(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Dropping redundant column default_ep_status from tv_shows') + self.upgrade_log('Dropping redundant column default_ep_status from tv_shows') self.drop_columns('tv_shows', 'default_ep_status') return self.set_db_version(10000) @@ -1349,7 +1341,7 @@ class RemoveMinorDBVersion(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Dropping redundant column db_minor_version from db_version') + self.upgrade_log('Dropping redundant column db_minor_version from db_version') self.drop_columns('db_version', 'db_minor_version') return self.set_db_version(10001) @@ -1359,7 +1351,7 @@ class RemoveMinorDBVersion(db.SchemaUpgrade): class RemoveMetadataSub(db.SchemaUpgrade): def execute(self): if self.has_column('tv_shows', 'sub_use_sr_metadata'): - self.upgrade_log(u'Dropping redundant column metadata sub') + self.upgrade_log('Dropping redundant column metadata sub') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.drop_columns('tv_shows', 'sub_use_sr_metadata') @@ -1371,10 +1363,10 @@ class DBIncreaseTo20001(db.SchemaUpgrade): def execute(self): db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) - self.upgrade_log(u'Bumping database version to force a backup before new database code') + self.upgrade_log('Bumping database version to force a backup before new database code') self.connection.action('VACUUM') - self.upgrade_log(u'Performed a vacuum on the database', logger.DEBUG) + self.upgrade_log('Performed a vacuum on the database', logger.DEBUG) return self.set_db_version(20001) @@ -1383,7 +1375,7 @@ class DBIncreaseTo20001(db.SchemaUpgrade): class AddTvShowOverview(db.SchemaUpgrade): def execute(self): if not self.has_column('tv_shows', 'overview'): - self.upgrade_log(u'Adding column overview to tv_shows') + self.upgrade_log('Adding column overview to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'overview', 'TEXT', '') @@ -1394,7 +1386,7 @@ class AddTvShowOverview(db.SchemaUpgrade): class AddTvShowTags(db.SchemaUpgrade): def execute(self): if not self.has_column('tv_shows', 'tag'): - self.upgrade_log(u'Adding tag to tv_shows') + self.upgrade_log('Adding tag to tv_shows') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.add_column('tv_shows', 'tag', 'TEXT', 'Show List') @@ -1410,7 +1402,7 @@ class ChangeMapIndexer(db.SchemaUpgrade): if self.has_table('indexer_mapping'): self.connection.action('DROP TABLE indexer_mapping') - self.upgrade_log(u'Changing table indexer_mapping') + self.upgrade_log('Changing table indexer_mapping') self.connection.action( 'CREATE TABLE indexer_mapping (indexer_id INTEGER, indexer NUMERIC, mindexer_id INTEGER NOT NULL,' ' mindexer NUMERIC, date NUMERIC NOT NULL DEFAULT 0, status INTEGER NOT NULL DEFAULT 0,' @@ -1422,7 +1414,7 @@ class ChangeMapIndexer(db.SchemaUpgrade): self.upgrade_log('Adding last_run_backlog to info') self.add_column('info', 'last_run_backlog', 'NUMERIC', 1) - self.upgrade_log(u'Moving table scene_exceptions from cache.db to sickbeard.db') + self.upgrade_log('Moving table scene_exceptions from cache.db to sickbeard.db') if self.has_table('scene_exceptions_refresh'): self.connection.action('DROP TABLE scene_exceptions_refresh') self.connection.action('CREATE TABLE scene_exceptions_refresh (list TEXT PRIMARY KEY, last_refreshed INTEGER)') @@ -1467,7 +1459,7 @@ class ChangeMapIndexer(db.SchemaUpgrade): class AddShowNotFoundCounter(db.SchemaUpgrade): def execute(self): if not self.has_table('tv_shows_not_found'): - self.upgrade_log(u'Adding table tv_shows_not_found') + self.upgrade_log('Adding table tv_shows_not_found') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.connection.action( @@ -1482,7 +1474,7 @@ class AddShowNotFoundCounter(db.SchemaUpgrade): class AddFlagTable(db.SchemaUpgrade): def execute(self): if not self.has_table('flags'): - self.upgrade_log(u'Adding table flags') + self.upgrade_log('Adding table flags') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.connection.action('CREATE TABLE flags (flag PRIMARY KEY NOT NULL )') @@ -1494,7 +1486,7 @@ class AddFlagTable(db.SchemaUpgrade): class DBIncreaseTo20007(db.SchemaUpgrade): def execute(self): - self.upgrade_log(u'Bumping database version') + self.upgrade_log('Bumping database version') return self.set_db_version(20007) @@ -1517,7 +1509,7 @@ class AddWatched(db.SchemaUpgrade): self.connection.action('VACUUM') if not self.has_table('tv_episodes_watched'): - self.upgrade_log(u'Adding table tv_episodes_watched') + self.upgrade_log('Adding table tv_episodes_watched') db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version()) self.connection.action( @@ -1561,7 +1553,7 @@ class AddIndexerToTables(db.SchemaUpgrade): for t in [(allowtbl, 'show_id'), (blocktbl, 'show_id'), ('history', 'showid'), ('scene_exceptions', 'indexer_id')]: if not self.has_column(t[0], 'indexer'): - self.upgrade_log(u'Adding TV info support to %s table' % t[0]) + self.upgrade_log(f'Adding TV info support to {t[0]} table') self.add_column(t[0], 'indexer') cl = [] for s_id, i in iteritems(show_ids): diff --git a/sickgear/db.py b/sickgear/db.py index 2e70ba16..c1efaed1 100644 --- a/sickgear/db.py +++ b/sickgear/db.py @@ -132,21 +132,21 @@ class DBConnection(object): :return: success, message """ if not db_supports_backup: - logger.log('this python sqlite3 version doesn\'t support backups', logger.DEBUG) + logger.debug('this python sqlite3 version doesn\'t support backups') return False, 'this python sqlite3 version doesn\'t support backups' if not os.path.isdir(target): - logger.log('Backup target invalid', logger.ERROR) + logger.error('Backup target invalid') return False, 'Backup target invalid' target_db = os.path.join(target, (backup_filename, self.filename)[None is backup_filename]) if os.path.exists(target_db): - logger.log('Backup target file already exists', logger.ERROR) + logger.error('Backup target file already exists') return False, 'Backup target file already exists' # noinspection PyUnusedLocal def progress(status, remaining, total): - logger.log('Copied %s of %s pages...' % (total - remaining, total), logger.DEBUG) + logger.debug('Copied %s of %s pages...' % (total - remaining, total)) backup_con = None @@ -156,9 +156,9 @@ class DBConnection(object): with backup_con: with db_lock: self.connection.backup(backup_con, progress=progress) - logger.log('%s backup successful' % self.filename, logger.DEBUG) + logger.debug('%s backup successful' % self.filename) except sqlite3.Error as error: - logger.log("Error while taking backup: %s" % ex(error), logger.ERROR) + logger.error("Error while taking backup: %s" % ex(error)) return False, 'Backup failed' finally: if backup_con: @@ -226,8 +226,8 @@ class DBConnection(object): self.connection.commit() if 0 < affected: - logger.debug(u'Transaction with %s queries executed affected at least %i row%s' % ( - len(queries), affected, helpers.maybe_plural(affected))) + logger.debug(f'Transaction with {len(queries)} queries executed affected at least {affected:d}' + f' row{helpers.maybe_plural(affected)}') return sql_result except sqlite3.OperationalError as e: sql_result = [] @@ -239,7 +239,7 @@ class DBConnection(object): except sqlite3.DatabaseError as e: if self.connection: self.connection.rollback() - logger.error(u'Fatal error executing query: ' + ex(e)) + logger.error(f'Fatal error executing query: {ex(e)}') raise return sql_result @@ -248,10 +248,10 @@ class DBConnection(object): def action_error(e): if 'unable to open database file' in e.args[0] or 'database is locked' in e.args[0]: - logger.log(u'DB error: ' + ex(e), logger.WARNING) + logger.warning(f'DB error: {ex(e)}') time.sleep(1) return True - logger.log(u'DB error: ' + ex(e), logger.ERROR) + logger.error(f'DB error: {ex(e)}') def action(self, query, args=None): # type: (AnyStr, Optional[List, Tuple]) -> Optional[Union[List, sqlite3.Cursor]] @@ -280,7 +280,7 @@ class DBConnection(object): raise attempt += 1 except sqlite3.DatabaseError as e: - logger.log(u'Fatal error executing query: ' + ex(e), logger.ERROR) + logger.error(f'Fatal error executing query: {ex(e)}') raise return sql_result @@ -424,7 +424,7 @@ class DBSanityCheck(object): def upgrade_database(connection, schema): - logger.log(u'Checking database structure...', logger.MESSAGE) + logger.log('Checking database structure...', logger.MESSAGE) connection.is_upgrading = False connection.new_db = 0 == connection.check_db_version() _process_upgrade(connection, schema) @@ -438,16 +438,16 @@ def _pretty_name(class_name): def _restore_database(filename, version): - logger.log(u'Restoring database before trying upgrade again') + logger.log('Restoring database before trying upgrade again') if not sickgear.helpers.restore_versioned_file(db_filename(filename=filename, suffix='v%s' % version), version): - logger.log_error_and_exit(u'Database restore failed, abort upgrading database') + logger.log_error_and_exit('Database restore failed, abort upgrading database') return False return True def _process_upgrade(connection, upgrade_class): instance = upgrade_class(connection) - logger.log('Checking %s database upgrade' % _pretty_name(upgrade_class.__name__), logger.DEBUG) + logger.debug('Checking %s database upgrade' % _pretty_name(upgrade_class.__name__)) if not instance.test(): connection.is_upgrading = True connection.upgrade_log(getattr(upgrade_class, 'pretty_name', None) or _pretty_name(upgrade_class.__name__)) @@ -471,9 +471,9 @@ def _process_upgrade(connection, upgrade_class): else: logger.log_error_and_exit('Database upgrade failed, can\'t determine old db version, not restoring.') - logger.log('%s upgrade completed' % upgrade_class.__name__, logger.DEBUG) + logger.debug('%s upgrade completed' % upgrade_class.__name__) else: - logger.log('%s upgrade not required' % upgrade_class.__name__, logger.DEBUG) + logger.debug('%s upgrade not required' % upgrade_class.__name__) for upgradeSubClass in upgrade_class.__subclasses__(): _process_upgrade(connection, upgradeSubClass) @@ -710,15 +710,15 @@ def migration_code(my_db): db_version = my_db.check_db_version() my_db.new_db = 0 == db_version - logger.log(u'Detected database version: v%s' % db_version, logger.DEBUG) + logger.debug(f'Detected database version: v{db_version}') if not (db_version in schema): if db_version == sickgear.mainDB.MAX_DB_VERSION: - logger.log(u'Database schema is up-to-date, no upgrade required') + logger.log('Database schema is up-to-date, no upgrade required') elif 10000 > db_version: - logger.log_error_and_exit(u'SickGear does not currently support upgrading from this database version') + logger.log_error_and_exit('SickGear does not currently support upgrading from this database version') else: - logger.log_error_and_exit(u'Invalid database version') + logger.log_error_and_exit('Invalid database version') else: @@ -733,13 +733,13 @@ def migration_code(my_db): cleanup_old_db_backups(my_db.filename) except (BaseException, Exception) as e: my_db.close() - logger.log(u'Failed to update database with error: %s attempting recovery...' % ex(e), logger.ERROR) + logger.error(f'Failed to update database with error: {ex(e)} attempting recovery...') if _restore_database(my_db.filename, db_version): # initialize the main SB database - logger.log_error_and_exit(u'Successfully restored database version: %s' % db_version) + logger.log_error_and_exit(f'Successfully restored database version: {db_version}') else: - logger.log_error_and_exit(u'Failed to restore database version: %s' % db_version) + logger.log_error_and_exit(f'Failed to restore database version: {db_version}') my_db.upgrade_log('Finished') @@ -765,11 +765,11 @@ def backup_database(db_connection, filename, version): logger.debug('new db, no backup required') return - logger.log(u'Backing up database before upgrade') + logger.log('Backing up database before upgrade') if not sickgear.helpers.backup_versioned_file(db_filename(filename), version): - logger.log_error_and_exit(u'Database backup failed, abort upgrading database') + logger.log_error_and_exit('Database backup failed, abort upgrading database') else: - logger.log(u'Proceeding with upgrade') + logger.log('Proceeding with upgrade') def get_rollback_module(): @@ -836,7 +836,7 @@ def backup_all_dbs(target, compress=True, prefer_7z=True): :return: success, message """ if not make_path(target): - logger.log('Failed to create db backup dir', logger.ERROR) + logger.error('Failed to create db backup dir') return False, 'Failed to create db backup dir' my_db = DBConnection('cache.db') last_backup = my_db.select('SELECT time FROM lastUpdate WHERE provider = ?', ['sickgear_db_backup']) diff --git a/sickgear/failedProcessor.py b/sickgear/failedProcessor.py index e1e6a40b..7af0483b 100644 --- a/sickgear/failedProcessor.py +++ b/sickgear/failedProcessor.py @@ -67,30 +67,33 @@ class FailedProcessor(LegacyFailedProcessor): :return: success :type: bool or None """ - self._log(u'Failed download detected: (%s, %s)' % (self.nzb_name, self.dir_name)) + self._log(f'Failed download detected: ({self.nzb_name}, {self.dir_name})') release_name = show_name_helpers.determine_release_name(self.dir_name, self.nzb_name) if None is release_name: - self._log(u'Warning: unable to find a valid release name.', logger.WARNING) + self._log('Warning: unable to find a valid release name.', logger.WARNING) raise exceptions_helper.FailedProcessingFailed() try: parser = NameParser(False, show_obj=self.show_obj, convert=True) parsed = parser.parse(release_name) except InvalidNameException: - self._log(u'Error: release name is invalid: ' + release_name, logger.DEBUG) + self._log(f'Error: release name is invalid: {release_name}', logger.DEBUG) raise exceptions_helper.FailedProcessingFailed() except InvalidShowException: - self._log(u'Error: unable to parse release name %s into a valid show' % release_name, logger.DEBUG) + self._log(f'Error: unable to parse release name {release_name} into a valid show', logger.DEBUG) raise exceptions_helper.FailedProcessingFailed() - logger.log(u"name_parser info: ", logger.DEBUG) - logger.log(u" - " + str(parsed.series_name), logger.DEBUG) - logger.log(u" - " + str(parsed.season_number), logger.DEBUG) - logger.log(u" - " + str(parsed.episode_numbers), logger.DEBUG) - logger.log(u" - " + str(parsed.extra_info), logger.DEBUG) - logger.log(u" - " + str(parsed.release_group), logger.DEBUG) - logger.log(u" - " + str(parsed.air_date), logger.DEBUG) + for cur_msg in ( + 'name_parser info: ', + f' - {parsed.series_name}', + f' - {parsed.season_number}', + f' - {parsed.episode_numbers}', + f' - {parsed.extra_info}', + f' - {parsed.release_group}', + f' - {parsed.air_date}' + ): + logger.debug(cur_msg) for episode in parsed.episode_numbers: segment = parsed.show_obj.get_episode(parsed.season_number, episode) diff --git a/sickgear/failed_history.py b/sickgear/failed_history.py index 8edc87fe..9afa6884 100644 --- a/sickgear/failed_history.py +++ b/sickgear/failed_history.py @@ -99,21 +99,20 @@ def add_failed(release): sql_result = db_select('SELECT * FROM history t WHERE t.release=?', [release]) if not any(sql_result): - logger.log('Release not found in failed.db snatch history', logger.WARNING) + logger.warning('Release not found in failed.db snatch history') elif 1 < len(sql_result): - logger.log('Multiple logged snatches found for release in failed.db', logger.WARNING) + logger.warning('Multiple logged snatches found for release in failed.db') sizes = len(set([x['size'] for x in sql_result])) providers = len(set([x['provider'] for x in sql_result])) if 1 == sizes: - logger.log('However, they\'re all the same size. Continuing with found size', logger.WARNING) + logger.warning('However, they\'re all the same size. Continuing with found size') size = sql_result[0]['size'] else: - logger.log( - 'They also vary in size. Deleting logged snatches and recording this release with no size/provider', - logger.WARNING) + logger.warning( + 'They also vary in size. Deleting logged snatches and recording this release with no size/provider') for cur_result in sql_result: remove_snatched(cur_result['release'], cur_result['size'], cur_result['provider']) @@ -165,7 +164,7 @@ def set_episode_failed(ep_obj): ep_obj.save_to_db() except EpisodeNotFoundException as e: - logger.log('Unable to get episode, please set its status manually: %s' % ex(e), logger.WARNING) + logger.warning('Unable to get episode, please set its status manually: %s' % ex(e)) def remove_failed(release): @@ -237,13 +236,13 @@ def revert_episode(ep_obj): else: status_revert = WANTED - logger.log('Episode not found in failed.db history. Setting it to WANTED', logger.WARNING) + logger.warning('Episode not found in failed.db history. Setting it to WANTED') ep_obj.status = status_revert ep_obj.save_to_db() except EpisodeNotFoundException as e: - logger.log('Unable to create episode, please set its status manually: %s' % ex(e), logger.WARNING) + logger.warning('Unable to create episode, please set its status manually: %s' % ex(e)) def find_old_status(ep_obj): @@ -289,8 +288,7 @@ def find_release(ep_obj): db_action('DELETE FROM history WHERE %s=? AND %s!=?' % ('`release`', '`date`'), [release, r['date']]) # Found a previously failed release - logger.log('Found failed.db history release %sx%s: [%s]' % ( - ep_obj.season, ep_obj.episode, release), logger.DEBUG) + logger.debug(f'Found failed.db history release {ep_obj.season}x{ep_obj.episode}: [{release}]') else: release = None provider = None diff --git a/sickgear/generic_queue.py b/sickgear/generic_queue.py index d57ebcbe..11c77bbe 100644 --- a/sickgear/generic_queue.py +++ b/sickgear/generic_queue.py @@ -89,7 +89,7 @@ class GenericQueue(object): my_db = db.DBConnection('cache.db') my_db.mass_action(cl) except (BaseException, Exception) as e: - logger.log('Exception saving queue %s to db: %s' % (self.__class__.__name__, ex(e)), logger.ERROR) + logger.error('Exception saving queue %s to db: %s' % (self.__class__.__name__, ex(e))) def _clear_sql(self): # type: (...) -> List[List] @@ -103,7 +103,7 @@ class GenericQueue(object): my_db = db.DBConnection('cache.db') my_db.mass_action(item_sql) except (BaseException, Exception) as e: - logger.log('Exception saving item %s to db: %s' % (item, ex(e)), logger.ERROR) + logger.error('Exception saving item %s to db: %s' % (item, ex(e))) def delete_item(self, item, finished_run=False): # type: (Union[QueueItem, CastQueueItem], bool) -> None @@ -119,7 +119,7 @@ class GenericQueue(object): my_db = db.DBConnection('cache.db') my_db.mass_action(item_sql) except (BaseException, Exception) as e: - logger.log('Exception deleting item %s from db: %s' % (item, ex(e)), logger.ERROR) + logger.error('Exception deleting item %s from db: %s' % (item, ex(e))) def _get_item_sql(self, item): # type: (Union[QueueItem, CastQueueItem]) -> List[List] @@ -211,12 +211,12 @@ class GenericQueue(object): my_db.mass_action(del_main_sql) def pause(self): - logger.log(u'Pausing queue') + logger.log('Pausing queue') if self.lock: self.min_priority = 999999999999 def unpause(self): - logger.log(u'Unpausing queue') + logger.log('Unpausing queue') with self.lock: self.min_priority = 0 @@ -258,7 +258,7 @@ class GenericQueue(object): if 0 == len(self.events[event_type]): del self.events[event_type] except (BaseException, Exception) as e: - logger.log('Error removing event method from queue: %s' % ex(e), logger.ERROR) + logger.error('Error removing event method from queue: %s' % ex(e)) def execute_events(self, event_type, *args, **kwargs): # type: (int, Tuple, Dict) -> None @@ -267,7 +267,7 @@ class GenericQueue(object): try: event(*args, **kwargs) except (BaseException, Exception) as e: - logger.log('Error executing Event: %s' % ex(e), logger.ERROR) + logger.error('Error executing Event: %s' % ex(e)) def run(self): diff --git a/sickgear/helpers.py b/sickgear/helpers.py index 3fc8c499..4cb670a3 100644 --- a/sickgear/helpers.py +++ b/sickgear/helpers.py @@ -345,7 +345,7 @@ def list_media_files(path): result = [] if path: if [direntry for direntry in scantree(path, include=[r'\.sickgearignore'], filter_kind=False, recurse=False)]: - logger.log('Skipping folder "%s" because it contains ".sickgearignore"' % path, logger.DEBUG) + logger.debug('Skipping folder "%s" because it contains ".sickgearignore"' % path) else: result = [direntry.path for direntry in scantree(path, exclude=['Extras'], filter_kind=False) if has_media_ext(direntry.name)] @@ -405,8 +405,7 @@ def hardlink_file(src_file, dest_file): link(src_file, dest_file) fix_set_group_id(dest_file) except (BaseException, Exception) as e: - logger.log(u"Failed to create hardlink of %s at %s: %s. Copying instead." % (src_file, dest_file, ex(e)), - logger.ERROR) + logger.error(f'Failed to create hardlink of {src_file} at {dest_file}: {ex(e)}. Copying instead.') copy_file(src_file, dest_file) @@ -441,7 +440,7 @@ def move_and_symlink_file(src_file, dest_file): fix_set_group_id(dest_file) symlink(dest_file, src_file) except (BaseException, Exception): - logger.log(u"Failed to create symlink of %s at %s. Copying instead" % (src_file, dest_file), logger.ERROR) + logger.error(f'Failed to create symlink of {src_file} at {dest_file}. Copying instead') copy_file(src_file, dest_file) @@ -488,10 +487,10 @@ def rename_ep_file(cur_path, new_path, old_path_length=0): # move the file try: - logger.log(u'Renaming file from %s to %s' % (cur_path, new_path)) + logger.log(f'Renaming file from {cur_path} to {new_path}') shutil.move(cur_path, new_path) except (OSError, IOError) as e: - logger.log(u"Failed renaming " + cur_path + " to " + new_path + ": " + ex(e), logger.ERROR) + logger.error(f'Failed renaming {cur_path} to {new_path}: {ex(e)}') return False # clean up any old folders that are empty @@ -513,7 +512,7 @@ def delete_empty_folders(check_empty_dir, keep_dir=None): # treat check_empty_dir as empty when it only contains these items ignore_items = [] - logger.log(u"Trying to clean any empty folders under " + check_empty_dir) + logger.log(f'Trying to clean any empty folders under {check_empty_dir}') # as long as the folder exists and doesn't contain any files, delete it while os.path.isdir(check_empty_dir) and check_empty_dir != keep_dir: @@ -523,13 +522,13 @@ def delete_empty_folders(check_empty_dir, keep_dir=None): [check_file in ignore_items for check_file in check_files])): # directory is empty or contains only ignore_items try: - logger.log(u"Deleting empty folder: " + check_empty_dir) + logger.log(f"Deleting empty folder: {check_empty_dir}") # need shutil.rmtree when ignore_items is really implemented os.rmdir(check_empty_dir) # do a Synology library update notifiers.NotifierFactory().get('SYNOINDEX').deleteFolder(check_empty_dir) except OSError as e: - logger.log(u"Unable to delete " + check_empty_dir + ": " + repr(e) + " / " + ex(e), logger.WARNING) + logger.warning(f'Unable to delete {check_empty_dir}: {repr(e)} / {ex(e)}') break check_empty_dir = os.path.dirname(check_empty_dir) else: @@ -559,9 +558,7 @@ def get_absolute_number_from_season_and_episode(show_obj, season, episode): if 1 == len(sql_result): absolute_number = int(sql_result[0]["absolute_number"]) - logger.log( - "Found absolute_number:" + str(absolute_number) + " by " + str(season) + "x" + str(episode), - logger.DEBUG) + logger.debug(f'Found absolute_number:{absolute_number} by {season}x{episode}') else: logger.debug('No entries for absolute number in show: %s found using %sx%s' % (show_obj.unique_name, str(season), str(episode))) @@ -600,7 +597,7 @@ def sanitize_scene_name(name): :rtype: AnyStr """ if name: - bad_chars = u',:()£\'!?\u2019' + bad_chars = ',:()£\'!?\u2019' # strip out any bad chars name = re.sub(r'[%s]' % bad_chars, '', name, flags=re.U) @@ -654,7 +651,7 @@ def parse_xml(data, del_xmlns=False): try: parsed_xml = etree.fromstring(data) except (BaseException, Exception) as e: - logger.log(u"Error trying to parse xml data. Error: " + ex(e), logger.DEBUG) + logger.debug(f"Error trying to parse xml data. Error: {ex(e)}") parsed_xml = None return parsed_xml @@ -686,28 +683,28 @@ def backup_versioned_file(old_file, version): except (BaseException, Exception): if os.path.isfile(new_file): continue - logger.log('could not rename old backup db file', logger.WARNING) + logger.warning('could not rename old backup db file') if not changed_old_db: raise Exception('can\'t create a backup of db') while not os.path.isfile(new_file): if not os.path.isfile(old_file) or 0 == get_size(old_file): - logger.log(u'No need to create backup', logger.DEBUG) + logger.debug('No need to create backup') break try: - logger.log(u'Trying to back up %s to %s' % (old_file, new_file), logger.DEBUG) + logger.debug(f'Trying to back up {old_file} to {new_file}') shutil.copy(old_file, new_file) - logger.log(u'Backup done', logger.DEBUG) + logger.debug('Backup done') break except (BaseException, Exception) as e: - logger.log(u'Error while trying to back up %s to %s : %s' % (old_file, new_file, ex(e)), logger.WARNING) + logger.warning(f'Error while trying to back up {old_file} to {new_file} : {ex(e)}') num_tries += 1 time.sleep(3) - logger.log(u'Trying again.', logger.DEBUG) + logger.debug('Trying again.') if 3 <= num_tries: - logger.log(u'Unable to back up %s to %s please do it manually.' % (old_file, new_file), logger.ERROR) + logger.error(f'Unable to back up {old_file} to {new_file} please do it manually.') return False return True @@ -729,39 +726,34 @@ def restore_versioned_file(backup_file, version): restore_file = new_file + '.' + 'v' + str(version) if not os.path.isfile(new_file): - logger.log(u"Not restoring, " + new_file + " doesn't exist", logger.DEBUG) + logger.debug(f'Not restoring, {new_file} doesn\'t exist') return False try: - logger.log( - u"Trying to backup " + new_file + " to " + new_file + "." + "r" + str(version) + " before restoring backup", - logger.DEBUG) + logger.debug(f'Trying to backup {new_file} to {new_file}.r{version} before restoring backup') shutil.move(new_file, new_file + '.' + 'r' + str(version)) except (BaseException, Exception) as e: - logger.log( - u"Error while trying to backup DB file " + restore_file + " before proceeding with restore: " + ex(e), - logger.WARNING) + logger.warning(f'Error while trying to backup DB file {restore_file} before proceeding with restore: {ex(e)}') return False while not os.path.isfile(new_file): if not os.path.isfile(restore_file): - logger.log(u"Not restoring, " + restore_file + " doesn't exist", logger.DEBUG) + logger.debug(f'Not restoring, {restore_file} doesn\'t exist') break try: - logger.log(u"Trying to restore " + restore_file + " to " + new_file, logger.DEBUG) + logger.debug(f'Trying to restore {restore_file} to {new_file}') shutil.copy(restore_file, new_file) - logger.log(u"Restore done", logger.DEBUG) + logger.debug('Restore done') break except (BaseException, Exception) as e: - logger.log(u"Error while trying to restore " + restore_file + ": " + ex(e), logger.WARNING) + logger.warning(f'Error while trying to restore {restore_file}: {ex(e)}') num_tries += 1 time.sleep(1) - logger.log(u"Trying again.", logger.DEBUG) + logger.debug('Trying again.') if 10 <= num_tries: - logger.log(u"Unable to restore " + restore_file + " to " + new_file + " please do it manually.", - logger.ERROR) + logger.error(f'Unable to restore {restore_file} to {new_file} please do it manually.') return False return True @@ -963,7 +955,7 @@ def get_show(name, try_scene_exceptions=False): if tvid and prodid: show_obj = find_show_by_id({tvid: prodid}) except (BaseException, Exception) as e: - logger.log(u'Error when attempting to find show: ' + name + ' in SickGear: ' + ex(e), logger.DEBUG) + logger.debug(f'Error when attempting to find show: {name} in SickGear: {ex(e)}') return show_obj @@ -1051,8 +1043,9 @@ def clear_cache(force=False): except OSError: dirty = True - logger.log(u'%s from cache folder %s' % ((('Found items not removed', 'Found items removed')[not dirty], - 'No items found to remove')[None is dirty], sickgear.CACHE_DIR)) + logger.log( + f'{(("Found items not removed", "Found items removed")[not dirty], "No items found to remove")[None is dirty]}' + f' from cache folder {sickgear.CACHE_DIR}') def human(size): @@ -1298,7 +1291,7 @@ def make_search_segment_html_string(segment, max_eps=5): segment = [segment] if segment and len(segment) > max_eps: seasons = [x for x in set([x.season for x in segment])] - seg_str = u'Season%s: ' % maybe_plural(len(seasons)) + seg_str = f'Season{maybe_plural(len(seasons))}: ' divider = '' for x in seasons: eps = [str(s.episode) for s in segment if x == s.season] @@ -1308,7 +1301,7 @@ def make_search_segment_html_string(segment, max_eps=5): divider = ', ' elif segment: episode_numbers = ['S%sE%s' % (str(x.season).zfill(2), str(x.episode).zfill(2)) for x in segment] - seg_str = u'Episode%s: %s' % (maybe_plural(len(episode_numbers)), ', '.join(episode_numbers)) + seg_str = f'Episode{maybe_plural(len(episode_numbers))}: {", ".join(episode_numbers)}' return seg_str @@ -1394,7 +1387,7 @@ def should_delete_episode(status): s = Quality.split_composite_status(status)[0] if s not in SNATCHED_ANY + [DOWNLOADED, ARCHIVED, IGNORED]: return True - logger.log('not safe to delete episode from db because of status: %s' % statusStrings[s], logger.DEBUG) + logger.debug('not safe to delete episode from db because of status: %s' % statusStrings[s]) return False @@ -1573,7 +1566,7 @@ def count_files_dirs(base_dir): try: files = scandir(base_dir) except OSError as e: - logger.log('Unable to count files %s / %s' % (repr(e), ex(e)), logger.WARNING) + logger.warning('Unable to count files %s / %s' % (repr(e), ex(e))) else: for e in files: if e.is_file(): @@ -1643,8 +1636,8 @@ def upgrade_new_naming(): try: move_file(entry.path, new_name) except (BaseException, Exception) as e: - logger.log('Unable to rename %s to %s: %s / %s' - % (entry.path, new_name, repr(e), ex(e)), logger.WARNING) + logger.warning('Unable to rename %s to %s: %s / %s' + % (entry.path, new_name, repr(e), ex(e))) else: # clean up files without reference in db try: @@ -1664,7 +1657,7 @@ def upgrade_new_naming(): try: entries = scandir(entry.path) except OSError as e: - logger.log('Unable to stat dirs %s / %s' % (repr(e), ex(e)), logger.WARNING) + logger.warning('Unable to stat dirs %s / %s' % (repr(e), ex(e))) continue for d_entry in entries: if d_entry.is_dir(): @@ -1679,14 +1672,13 @@ def upgrade_new_naming(): try: move_file(d_entry.path, new_dir_name) except (BaseException, Exception) as e: - logger.log('Unable to rename %s to %s: %s / %s' % - (d_entry.path, new_dir_name, repr(e), ex(e)), logger.WARNING) + logger.warning(f'Unable to rename {d_entry.path} to {new_dir_name}:' + f' {repr(e)} / {ex(e)}') if os.path.isdir(new_dir_name): try: f_n = filter(lambda fn: fn.is_file(), scandir(new_dir_name)) except OSError as e: - logger.log('Unable to rename %s / %s' % (repr(e), ex(e)), - logger.WARNING) + logger.warning('Unable to rename %s / %s' % (repr(e), ex(e))) else: rename_args = [] # noinspection PyTypeChecker @@ -1697,8 +1689,8 @@ def upgrade_new_naming(): try: move_file(*args) except (BaseException, Exception) as e: - logger.log('Unable to rename %s to %s: %s / %s' % - (args[0], args[1], repr(e), ex(e)), logger.WARNING) + logger.warning(f'Unable to rename {args[0]} to {args[1]}:' + f' {repr(e)} / {ex(e)}') else: try: shutil.rmtree(d_entry.path) @@ -1754,11 +1746,11 @@ def normalise_chars(text): :return: Text with entities replaced :rtype: AnyStr """ - result = text.replace(u'\u2010', u'-').replace(u'\u2011', u'-').replace(u'\u2012', u'-') \ - .replace(u'\u2013', u'-').replace(u'\u2014', u'-').replace(u'\u2015', u'-') \ - .replace(u'\u2018', u"'").replace(u'\u2019', u"'") \ - .replace(u'\u201c', u'\"').replace(u'\u201d', u'\"') \ - .replace(u'\u0020', u' ').replace(u'\u00a0', u' ') + result = text.replace('\u2010', '-').replace('\u2011', '-').replace('\u2012', '-') \ + .replace('\u2013', '-').replace('\u2014', '-').replace('\u2015', '-') \ + .replace('\u2018', "'").replace('\u2019', "'") \ + .replace('\u201c', '\"').replace('\u201d', '\"') \ + .replace('\u0020', ' ').replace('\u00a0', ' ') return result diff --git a/sickgear/image_cache.py b/sickgear/image_cache.py index f0372358..10fe08da 100644 --- a/sickgear/image_cache.py +++ b/sickgear/image_cache.py @@ -277,9 +277,9 @@ class ImageCache(object): result = [] for filename in glob.glob(image_file): result.append(os.path.isfile(filename) and filename) - logger.log(u'Found cached %s' % filename, logger.DEBUG) + logger.debug(f'Found cached {filename}') - not any(result) and logger.log(u'No cache for %s' % image_file, logger.DEBUG) + not any(result) and logger.debug(f'No cache for {image_file}') return any(result) def has_poster(self, tvid, prodid): @@ -365,7 +365,7 @@ class ImageCache(object): :param is_binary: is data instead of path """ if not is_binary and not os.path.isfile(image): - logger.warning(u'File not found to determine image type of %s' % image) + logger.warning(f'File not found to determine image type of {image}') return if not image: logger.warning('No Image Data to determinate image type') @@ -381,7 +381,7 @@ class ImageCache(object): img_parser.parse_photoshop_content = False img_metadata = extractMetadata(img_parser) except (BaseException, Exception) as e: - logger.debug(u'Unable to extract metadata from %s, not using file. Error: %s' % (image, ex(e))) + logger.debug(f'Unable to extract metadata from {image}, not using file. Error: {ex(e)}') return if not img_metadata: @@ -389,7 +389,7 @@ class ImageCache(object): msg = 'Image Data' else: msg = image - logger.debug(u'Unable to extract metadata from %s, not using file' % msg) + logger.debug(f'Unable to extract metadata from {msg}, not using file') return width = img_metadata.get('width') @@ -441,9 +441,9 @@ class ImageCache(object): logger.debug(msg_success % 'fanart') return self.FANART - logger.warning(u'Skipped image with fanart aspect ratio but less than 500 pixels wide') + logger.warning('Skipped image with fanart aspect ratio but less than 500 pixels wide') else: - logger.warning(u'Skipped image with useless ratio %s' % img_ratio) + logger.warning(f'Skipped image with useless ratio {img_ratio}') def should_refresh(self, image_type=None, provider='local'): # type: (int, Optional[AnyStr]) -> bool @@ -522,13 +522,13 @@ class ImageCache(object): dest_path = self.fanart_path(*id_args + (prefix,)).replace('.fanart.jpg', '.%s.fanart.jpg' % crc) fanart_dir = [self._fanart_dir(*id_args)] else: - logger.log(u'Invalid cache image type: ' + str(img_type), logger.ERROR) + logger.error(f'Invalid cache image type: {img_type}') return False for cache_dir in [self.shows_dir, self._thumbnails_dir(*id_args)] + fanart_dir: sg_helpers.make_path(cache_dir) - logger.log(u'%sing from %s to %s' % (('Copy', 'Mov')[move_file], image_path, dest_path)) + logger.log(f'{("Copy", "Mov")[move_file]}ing from {image_path} to {dest_path}') # copy poster, banner as thumb, even if moved we need to duplicate the images if img_type in (self.POSTER, self.BANNER) and dest_thumb_path: sg_helpers.copy_file(image_path, dest_thumb_path) @@ -574,7 +574,7 @@ class ImageCache(object): img_type_name = 'banner_thumb' dest_path = self.banner_thumb_path(*arg_tvid_prodid) else: - logger.log(u'Invalid cache image type: ' + str(img_type), logger.ERROR) + logger.error(f'Invalid cache image type: {img_type}') return False # retrieve the image from TV info source using the generic metadata class @@ -625,10 +625,9 @@ class ImageCache(object): if num_files > max_files: break total = len(glob.glob(dest_path)) - logger.log(u'Saved %s fanart images%s. Cached %s of max %s fanart file%s' - % (success, - ('', ' from ' + ', '.join([x for x in list(set(sources))]))[0 < len(sources)], - total, sickgear.FANART_LIMIT, sg_helpers.maybe_plural(total))) + logger.log(f'Saved {success} fanart images' + f'{("", " from " + ", ".join([x for x in list(set(sources))]))[0 < len(sources)]}.' + f' Cached {total} of max {sickgear.FANART_LIMIT} fanart file{sg_helpers.maybe_plural(total)}') return bool(success) image_urls = metadata_generator.retrieve_show_image(img_type_name, show_obj, return_links=True, @@ -656,7 +655,7 @@ class ImageCache(object): break if result: - logger.log(u'Saved image type %s' % img_type_name) + logger.log(f'Saved image type {img_type_name}') return result def fill_cache(self, show_obj, force=False): @@ -683,7 +682,7 @@ class ImageCache(object): self.BANNER_THUMB: not self.has_banner_thumbnail(*arg_tvid_prodid) or force} if not any(itervalues(need_images)): - logger.log(u'%s: No new cache images needed. Done.' % show_obj.tvid_prodid) + logger.log(f'{show_obj.tvid_prodid}: No new cache images needed. Done.') return show_infos = GenericMetadata.gen_show_infos_dict(show_obj) @@ -698,7 +697,7 @@ class ImageCache(object): del (sickgear.FANART_RATINGS[show_obj.tvid_prodid]) result = sg_helpers.remove_file(cache_dir, tree=True) if result: - logger.log(u'%s cache file %s' % (result, cache_dir), logger.DEBUG) + logger.debug(f'{result} cache file {cache_dir}') try: checked_files = [] @@ -718,7 +717,7 @@ class ImageCache(object): if 0 == len(needed): break - logger.log(u'Checking for images from optional %s metadata' % cur_provider.name, logger.DEBUG) + logger.debug(f'Checking for images from optional {cur_provider.name} metadata') for all_meta_provs, path_file in needed: checked_files += [path_file] @@ -735,9 +734,10 @@ class ImageCache(object): if None is cur_file_type: continue - logger.log(u'Checking if image %s (type %s needs metadata: %s)' - % (cache_file_name, str(cur_file_type), - ('No', 'Yes')[True is need_images[cur_file_type]]), logger.DEBUG) + logger.debug(f'Checking if image {cache_file_name} ' + f'(type {str(cur_file_type)}' + f' needs metadata: {("No", "Yes")[True is need_images[cur_file_type]]}' + f')') if need_images.get(cur_file_type): need_images[cur_file_type] = ( @@ -746,8 +746,8 @@ class ImageCache(object): if self.FANART == cur_file_type and \ (not sickgear.FANART_LIMIT or sickgear.FANART_LIMIT < need_images[cur_file_type]): continue - logger.log(u'Caching image found in the show directory to the image cache: %s, type %s' - % (cache_file_name, cur_file_type), logger.DEBUG) + logger.debug(f'Caching image found in the show directory to the image cache: {cache_file_name},' + f' type {cur_file_type}') self._cache_image_from_file( cache_file_name, cur_file_type, @@ -755,7 +755,7 @@ class ImageCache(object): isinstance(need_images[cur_file_type], bool)],)) except exceptions_helper.ShowDirNotFoundException: - logger.log(u'Unable to search for images in show directory because it doesn\'t exist', logger.WARNING) + logger.warning('Unable to search for images in show directory because it doesn\'t exist') # download images from TV info sources for image_type, name_type in [[self.POSTER, 'Poster'], [self.BANNER, 'Banner'], [self.FANART, 'Fanart']]: @@ -763,12 +763,12 @@ class ImageCache(object): if not max_files or max_files < need_images[image_type]: continue - logger.log(u'Seeing if we still need an image of type %s: %s' - % (name_type, ('No', 'Yes')[True is need_images[image_type]]), logger.DEBUG) + logger.debug(f'Seeing if we still need an image of type {name_type}:' + f' {("No", "Yes")[True is need_images[image_type]]}') if need_images[image_type]: file_num = (need_images[image_type] + 1, 1)[isinstance(need_images[image_type], bool)] if file_num <= max_files: self._cache_info_source_images(show_obj, image_type, file_num, max_files, force=force, show_infos=show_infos) - logger.log(u'Done cache check') + logger.log('Done cache check') diff --git a/sickgear/indexermapper.py b/sickgear/indexermapper.py index 38d0f022..fcb924c2 100644 --- a/sickgear/indexermapper.py +++ b/sickgear/indexermapper.py @@ -408,7 +408,7 @@ def load_mapped_ids(**kwargs): cur_show_obj.ids = sickgear.indexermapper.map_indexers_to_show(cur_show_obj, **n_kargs) except (BaseException, Exception): logger.debug('Error loading mapped id\'s for show: %s' % cur_show_obj.unique_name) - logger.log('Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error('Traceback: %s' % traceback.format_exc()) logger.log('TV info mappings loaded') diff --git a/sickgear/logger.py b/sickgear/logger.py index 2e479737..702fdb8f 100644 --- a/sickgear/logger.py +++ b/sickgear/logger.py @@ -51,7 +51,7 @@ MESSAGE = logging.INFO DEBUG = logging.DEBUG DB = 5 -reverseNames = {u'ERROR': ERROR, u'WARNING': WARNING, u'INFO': MESSAGE, u'DEBUG': DEBUG, u'DB': DB} +reverseNames = {'ERROR': ERROR, 'WARNING': WARNING, 'INFO': MESSAGE, 'DEBUG': DEBUG, 'DB': DB} # suppress output with this handler diff --git a/sickgear/metadata/generic.py b/sickgear/metadata/generic.py index 62b122dc..00c971ef 100644 --- a/sickgear/metadata/generic.py +++ b/sickgear/metadata/generic.py @@ -150,31 +150,31 @@ class GenericMetadata(object): def _has_show_metadata(self, show_obj): # type: (sickgear.tv.TVShow) -> bool result = os.path.isfile(self.get_show_file_path(show_obj)) - logger.log(u"Checking if " + self.get_show_file_path(show_obj) + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {self.get_show_file_path(show_obj)} exists: {result}') return result def has_episode_metadata(self, ep_obj): # type: (sickgear.tv.TVEpisode) -> bool result = os.path.isfile(self.get_episode_file_path(ep_obj)) - logger.log(u"Checking if " + self.get_episode_file_path(ep_obj) + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {self.get_episode_file_path(ep_obj)} exists: {result}') return result def _has_fanart(self, show_obj): # type: (sickgear.tv.TVShow) -> bool result = os.path.isfile(self.get_fanart_path(show_obj)) - logger.log(u"Checking if " + self.get_fanart_path(show_obj) + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {self.get_fanart_path(show_obj)} exists: {result}') return result def _has_poster(self, show_obj): # type: (sickgear.tv.TVShow) -> bool result = os.path.isfile(self.get_poster_path(show_obj)) - logger.log(u"Checking if " + self.get_poster_path(show_obj) + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {self.get_poster_path(show_obj)} exists: {result}') return result def _has_banner(self, show_obj): # type: (sickgear.tv.TVShow) -> bool result = os.path.isfile(self.get_banner_path(show_obj)) - logger.log(u"Checking if " + self.get_banner_path(show_obj) + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {self.get_banner_path(show_obj)} exists: {result}') return result def has_episode_thumb(self, ep_obj): @@ -182,7 +182,7 @@ class GenericMetadata(object): location = self.get_episode_thumb_path(ep_obj) result = None is not location and os.path.isfile(location) if location: - logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {location} exists: {result}') return result def _has_season_poster(self, show_obj, season): @@ -190,7 +190,7 @@ class GenericMetadata(object): location = self.get_season_poster_path(show_obj, season) result = None is not location and os.path.isfile(location) if location: - logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {location} exists: {result}') return result def _has_season_banner(self, show_obj, season): @@ -198,21 +198,19 @@ class GenericMetadata(object): location = self.get_season_banner_path(show_obj, season) result = None is not location and os.path.isfile(location) if location: - logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG) + logger.debug(f'Checking if {location} exists: {result}') return result def _has_season_all_poster(self, show_obj): # type: (sickgear.tv.TVShow) -> bool result = os.path.isfile(self.get_season_all_poster_path(show_obj)) - logger.log(u"Checking if " + self.get_season_all_poster_path(show_obj) + " exists: " + str(result), - logger.DEBUG) + logger.debug(f'Checking if {self.get_season_all_poster_path(show_obj)} exists: {result}') return result def _has_season_all_banner(self, show_obj): # type: (sickgear.tv.TVShow) -> bool result = os.path.isfile(self.get_season_all_banner_path(show_obj)) - logger.log(u"Checking if " + self.get_season_all_banner_path(show_obj) + " exists: " + str(result), - logger.DEBUG) + logger.debug(f'Checking if {self.get_season_all_banner_path(show_obj)} exists: {result}') return result @staticmethod @@ -343,8 +341,7 @@ class GenericMetadata(object): isinstance(getattr(fetched_show_info, 'data', None), (list, dict)) and 'seriesname' in getattr(fetched_show_info, 'data', [])) and \ not hasattr(fetched_show_info, 'seriesname'): - logger.log(u'Show %s not found on %s ' % - (show_obj.name, sickgear.TVInfoAPI(show_obj.tvid).name), logger.WARNING) + logger.warning(f'Show {show_obj.name} not found on {sickgear.TVInfoAPI(show_obj.tvid).name} ') return False return True @@ -364,8 +361,8 @@ class GenericMetadata(object): try: result = self.write_show_file(show_obj) except BaseTVinfoError as e: - logger.log('Unable to find useful show metadata for %s on %s: %s' % ( - self.name, sickgear.TVInfoAPI(show_obj.tvid).name, ex(e)), logger.WARNING) + logger.warning(f'Unable to find useful show metadata for {self.name}' + f' on {sickgear.TVInfoAPI(show_obj.tvid).name}: {ex(e)}') return result @@ -373,21 +370,20 @@ class GenericMetadata(object): # type: (sickgear.tv.TVEpisode, bool) -> bool result = False if self.episode_metadata and ep_obj and (not self.has_episode_metadata(ep_obj) or force): - logger.log('Metadata provider %s creating episode metadata for %s' % (self.name, ep_obj.pretty_name()), - logger.DEBUG) + logger.debug('Metadata provider %s creating episode metadata for %s' % (self.name, ep_obj.pretty_name())) try: result = self.write_ep_file(ep_obj) except BaseTVinfoError as e: - logger.log('Unable to find useful episode metadata for %s on %s: %s' % ( - self.name, sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name, ex(e)), logger.WARNING) + logger.warning(f'Unable to find useful episode metadata for {self.name}' + f' on {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name}: {ex(e)}') return result def update_show_indexer_metadata(self, show_obj): # type: (sickgear.tv.TVShow) -> bool if self.show_metadata and show_obj and self._has_show_metadata(show_obj): - logger.debug(u'Metadata provider %s updating show indexer metadata file for %s' % ( - self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name}' + f' updating show indexer metadata file for {show_obj.unique_name}') nfo_file_path = self.get_show_file_path(show_obj) with io.open(nfo_file_path, 'r', encoding='utf8') as xmlFileObj: @@ -419,29 +415,28 @@ class GenericMetadata(object): def create_fanart(self, show_obj): # type: (sickgear.tv.TVShow) -> bool if self.fanart and show_obj and not self._has_fanart(show_obj): - logger.debug(u'Metadata provider %s creating fanart for %s' % (self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating fanart for {show_obj.unique_name}') return self.save_fanart(show_obj) return False def create_poster(self, show_obj): # type: (sickgear.tv.TVShow) -> bool if self.poster and show_obj and not self._has_poster(show_obj): - logger.debug(u'Metadata provider %s creating poster for %s' % (self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating poster for {show_obj.unique_name}') return self.save_poster(show_obj) return False def create_banner(self, show_obj): # type: (sickgear.tv.TVShow) -> bool if self.banner and show_obj and not self._has_banner(show_obj): - logger.debug(u'Metadata provider %s creating banner for %s' % (self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating banner for {show_obj.unique_name}') return self.save_banner(show_obj) return False def create_episode_thumb(self, ep_obj): # type: (sickgear.tv.TVEpisode) -> bool if self.episode_thumbnails and ep_obj and not self.has_episode_thumb(ep_obj): - logger.log(u"Metadata provider " + self.name + " creating episode thumbnail for " + ep_obj.pretty_name(), - logger.DEBUG) + logger.debug(f'Metadata provider {self.name} creating episode thumbnail for {ep_obj.pretty_name()}') return self.save_thumbnail(ep_obj) return False @@ -451,8 +446,7 @@ class GenericMetadata(object): result = [] for season, _ in iteritems(show_obj.sxe_ep_obj): if not self._has_season_poster(show_obj, season): - logger.debug(u'Metadata provider %s creating season posters for %s' % ( - self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating season posters for {show_obj.unique_name}') result = result + [self.save_season_posters(show_obj, season)] return all(result) return False @@ -463,8 +457,7 @@ class GenericMetadata(object): result = [] for season, _ in iteritems(show_obj.sxe_ep_obj): if not self._has_season_banner(show_obj, season): - logger.debug(u'Metadata provider %s creating season banners for %s' % ( - self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating season banners for {show_obj.unique_name}') result = result + [self.save_season_banners(show_obj, season)] return all(result) return False @@ -472,16 +465,14 @@ class GenericMetadata(object): def create_season_all_poster(self, show_obj): # type: (sickgear.tv.TVShow) -> bool if self.season_all_poster and show_obj and not self._has_season_all_poster(show_obj): - logger.debug(u'Metadata provider %s creating season all posters for %s' % ( - self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating season all posters for {show_obj.unique_name}') return self.save_season_all_poster(show_obj) return False def create_season_all_banner(self, show_obj): # type: (sickgear.tv.TVShow) -> bool if self.season_all_banner and show_obj and not self._has_season_all_banner(show_obj): - logger.debug(u'Metadata provider %s creating season all banner for %s' % ( - self.name, show_obj.unique_name)) + logger.debug(f'Metadata provider {self.name} creating season all banner for {show_obj.unique_name}') return self.save_season_all_banner(show_obj) return False @@ -557,7 +548,7 @@ class GenericMetadata(object): nfo_file_path = self.get_show_file_path(show_obj) - logger.log(u'Writing show metadata file: %s' % nfo_file_path, logger.DEBUG) + logger.debug(f'Writing show metadata file: {nfo_file_path}') return sg_helpers.write_file(nfo_file_path, data, xmltree=True, utf8=True) @@ -586,7 +577,7 @@ class GenericMetadata(object): nfo_file_path = self.get_episode_file_path(ep_obj) - logger.log(u'Writing episode metadata file: %s' % nfo_file_path, logger.DEBUG) + logger.debug(f'Writing episode metadata file: {nfo_file_path}') return sg_helpers.write_file(nfo_file_path, data, xmltree=True, utf8=True) @@ -603,14 +594,14 @@ class GenericMetadata(object): file_path = self.get_episode_thumb_path(ep_obj) if not file_path: - logger.log(u"Unable to find a file path to use for this thumbnail, not generating it", logger.DEBUG) + logger.debug('Unable to find a file path to use for this thumbnail, not generating it') return False thumb_url = self._get_episode_thumb_url(ep_obj) # if we can't find one then give up if not thumb_url: - logger.log(u"No thumb is available for this episode, not creating a thumb", logger.DEBUG) + logger.debug('No thumb is available for this episode, not creating a thumb') return False thumb_data = metadata_helpers.get_show_image(thumb_url, show_name=ep_obj.show_obj.name) @@ -641,7 +632,7 @@ class GenericMetadata(object): img_cache_type=sickgear.image_cache.ImageCache.FANART) if not fanart_data: - logger.log(u"No fanart image was retrieved, unable to write fanart", logger.DEBUG) + logger.debug('No fanart image was retrieved, unable to write fanart') return False return self._write_image(fanart_data, fanart_path) @@ -662,7 +653,7 @@ class GenericMetadata(object): img_cache_type=sickgear.image_cache.ImageCache.POSTER) if not poster_data: - logger.log(u"No show poster image was retrieved, unable to write poster", logger.DEBUG) + logger.debug('No show poster image was retrieved, unable to write poster') return False return self._write_image(poster_data, poster_path) @@ -683,7 +674,7 @@ class GenericMetadata(object): img_cache_type=sickgear.image_cache.ImageCache.BANNER) if not banner_data: - logger.log(u"No show banner image was retrieved, unable to write banner", logger.DEBUG) + logger.debug('No show banner image was retrieved, unable to write banner') return False return self._write_image(banner_data, banner_path) @@ -717,14 +708,13 @@ class GenericMetadata(object): season_poster_file_path = self.get_season_poster_path(show_obj, cur_season) if not season_poster_file_path: - logger.log(u'Path for season ' + str(cur_season) + ' came back blank, skipping this season', - logger.DEBUG) + logger.debug(f'Path for season {cur_season} came back blank, skipping this season') continue season_data = metadata_helpers.get_show_image(season_url, show_name=show_obj.name) if not season_data: - logger.log(u'No season poster data available, skipping this season', logger.DEBUG) + logger.debug('No season poster data available, skipping this season') continue result = result + [self._write_image(season_data, season_poster_file_path)] @@ -762,14 +752,13 @@ class GenericMetadata(object): season_banner_file_path = self.get_season_banner_path(show_obj, cur_season) if not season_banner_file_path: - logger.log(u'Path for season ' + str(cur_season) + ' came back blank, skipping this season', - logger.DEBUG) + logger.debug(f'Path for season {cur_season} came back blank, skipping this season') continue season_data = metadata_helpers.get_show_image(season_url, show_name=show_obj.name) if not season_data: - logger.log(u'No season banner data available, skipping this season', logger.DEBUG) + logger.debug('No season banner data available, skipping this season') continue result = result + [self._write_image(season_data, season_banner_file_path)] @@ -787,7 +776,7 @@ class GenericMetadata(object): img_cache_type=sickgear.image_cache.ImageCache.POSTER) if not poster_data: - logger.log(u"No show poster image was retrieved, unable to write season all poster", logger.DEBUG) + logger.debug('No show poster image was retrieved, unable to write season all poster') return False return self._write_image(poster_data, poster_path) @@ -801,7 +790,7 @@ class GenericMetadata(object): img_cache_type=sickgear.image_cache.ImageCache.BANNER) if not banner_data: - logger.log(u"No show banner image was retrieved, unable to write season all banner", logger.DEBUG) + logger.debug('No show banner image was retrieved, unable to write season all banner') return False return self._write_image(banner_data, banner_path) @@ -819,18 +808,18 @@ class GenericMetadata(object): # don't bother overwriting it if not force and os.path.isfile(image_path): - logger.log(u"Image already exists, not downloading", logger.DEBUG) + logger.debug('Image already exists, not downloading') return False if not image_data: - logger.log(u"Unable to retrieve image, skipping", logger.WARNING) + logger.warning('Unable to retrieve image, skipping') return False image_dir = os.path.dirname(image_path) try: if not os.path.isdir(image_dir): - logger.log(u"Metadata dir didn't exist, creating it at " + image_dir, logger.DEBUG) + logger.debug(f'Metadata dir didn"t exist, creating it at {image_dir}') os.makedirs(image_dir) sg_helpers.chmod_as_parent(image_dir) @@ -839,9 +828,7 @@ class GenericMetadata(object): out_file.close() sg_helpers.chmod_as_parent(image_path) except IOError as e: - logger.log( - u"Unable to write image to " + image_path + " - are you sure the show folder is writable? " + ex(e), - logger.ERROR) + logger.error(f'Unable to write image to {image_path} - are you sure the show folder is writable? {ex(e)}') return False return True @@ -869,8 +856,8 @@ class GenericMetadata(object): return t.get_show((show_obj.ids[tv_id]['id'], show_obj.prodid)[tv_src == show_obj.tvid], load_episodes=False, banners=True, posters=True, fanart=True, language=show_obj.lang) except (BaseTVinfoError, IOError) as e: - logger.log(u"Unable to look up show on " + sickgear.TVInfoAPI( - tv_id).name + ", not downloading images: " + ex(e), logger.WARNING) + logger.warning(f'Unable to look up show on {sickgear.TVInfoAPI(tv_id).name},' + f' not downloading images: {ex(e)}') # todo: when tmdb is added as tv source remove the hardcoded TVINFO_TMDB for tv_src in list(OrderedDict.fromkeys([show_obj.tvid] + list(sickgear.TVInfoAPI().search_sources) + @@ -1042,8 +1029,8 @@ class GenericMetadata(object): image_type = 'fanart' if image_type not in ('poster', 'banner', 'fanart', 'poster_thumb', 'banner_thumb'): - logger.log(u"Invalid image type " + str(image_type) + ", couldn't find it in the " + sickgear.TVInfoAPI( - show_obj.tvid).name + " object", logger.ERROR) + logger.error(f'Invalid image type {image_type}, couldn\'t find it in the' + f' {sickgear.TVInfoAPI(show_obj.tvid).name} object') return image_urls = self._retrieve_image_urls(show_obj, image_type, show_infos) @@ -1094,8 +1081,8 @@ class GenericMetadata(object): t = sickgear.TVInfoAPI(show_obj.tvid).setup(**tvinfo_config) tvinfo_obj_show = t[show_obj.prodid] except (BaseTVinfoError, IOError) as e: - logger.log(u'Unable to look up show on ' + sickgear.TVInfoAPI( - show_obj.tvid).name + ', not downloading images: ' + ex(e), logger.WARNING) + logger.warning(f'Unable to look up show on {sickgear.TVInfoAPI(show_obj.tvid).name},' + f' not downloading images: {ex(e)}') return result if not self._valid_show(tvinfo_obj_show, show_obj): @@ -1124,10 +1111,10 @@ class GenericMetadata(object): metadata_path = os.path.join(folder, self._show_metadata_filename) if not os.path.isdir(folder) or not os.path.isfile(metadata_path): - logger.log(u"Can't load the metadata file from " + repr(metadata_path) + ", it doesn't exist", logger.DEBUG) + logger.debug(f'Can\'t load the metadata file from {repr(metadata_path)}, it doesn\'t exist') return empty_return - logger.log(u"Loading show info from metadata file in " + folder, logger.DEBUG) + logger.debug(f'Loading show info from metadata file in {folder}') try: with io.open(metadata_path, 'r', encoding='utf8') as xmlFileObj: @@ -1138,11 +1125,9 @@ class GenericMetadata(object): show_xml.findtext('tvdbid'), show_xml.findtext('id'), show_xml.findtext('indexer'))): - logger.log(u"Invalid info in tvshow.nfo (missing name or id):" - + str(show_xml.findtext('title')) + ' ' - + str(show_xml.findtext('indexer')) + ' ' - + str(show_xml.findtext('tvdbid')) + ' ' - + str(show_xml.findtext('id'))) + logger.log(f'Invalid info in tvshow.nfo (missing name or id):' + f'{show_xml.findtext("title")} {show_xml.findtext("indexer")} ' + f'{show_xml.findtext("tvdbid")} {show_xml.findtext("id")}') return empty_return name = show_xml.findtext('title') @@ -1178,17 +1163,15 @@ class GenericMetadata(object): except (BaseException, Exception): pass else: - logger.log(u"Empty or field in NFO, unable to find a ID", logger.WARNING) + logger.warning('Empty or field in NFO, unable to find a ID') return empty_return if None is prodid: - logger.log(u"Invalid Show ID (%s), not using metadata file" % prodid, logger.WARNING) + logger.warning(f'Invalid Show ID (%s), not using metadata file {prodid}') return empty_return except (BaseException, Exception) as e: - logger.log( - u"There was an error parsing your existing metadata file: '" + metadata_path + "' error: " + ex(e), - logger.WARNING) + logger.warning(f'There was an error parsing your existing metadata file: "{metadata_path}" error: {ex(e)}') return empty_return return tvid, prodid, name @@ -1202,7 +1185,7 @@ class GenericMetadata(object): except (BaseException, Exception): pass - logger.log(u'Could not find any %s images on Fanart.tv for %s' % (image_type, show_obj.name), logger.DEBUG) + logger.debug(f'Could not find any {image_type} images on Fanart.tv for {show_obj.name}') @staticmethod def _fanart_urls(tvdb_id, image_type='banner', lang='en', thumb=False): diff --git a/sickgear/metadata/helpers.py b/sickgear/metadata/helpers.py index f0f5254a..0aac20c8 100644 --- a/sickgear/metadata/helpers.py +++ b/sickgear/metadata/helpers.py @@ -42,7 +42,7 @@ def get_show_image(url, img_num=None, show_name=None, supress_log=False): # if they provided a fanart number try to use it instead temp_url = url if None is img_num else url.split('-')[0] + '-' + str(img_num) + '.jpg' - logger.log(u'Fetching image from ' + temp_url, logger.DEBUG) + logger.debug(f'Fetching image from {temp_url}') from sickgear import FLARESOLVERR_HOST, MEMCACHE MEMCACHE.setdefault('cookies', {}) @@ -51,8 +51,8 @@ def get_show_image(url, img_num=None, show_name=None, supress_log=False): if None is image_data: if supress_log: return - logger.log('There was an error trying to retrieve the image%s, aborting' % - ('', ' for show: %s' % show_name)[None is not show_name], logger.WARNING) + logger.warning(f'There was an error trying to retrieve the image' + f'{("", " for show: %s" % show_name)[None is not show_name]}, aborting') return return image_data diff --git a/sickgear/metadata/kodi.py b/sickgear/metadata/kodi.py index 9723d940..5b836ad4 100644 --- a/sickgear/metadata/kodi.py +++ b/sickgear/metadata/kodi.py @@ -127,13 +127,11 @@ class KODIMetadata(generic.GenericMetadata): try: show_info = t[int(show_id)] except BaseTVinfoShownotfound as e: - logger.log('Unable to find show with id %s on %s, skipping it' % (show_id, sickgear.TVInfoAPI( - show_obj.tvid).name), logger.ERROR) + logger.error(f'Unable to find show with id {show_id} on {sickgear.TVInfoAPI(show_obj.tvid).name},' + f' skipping it') raise e except BaseTVinfoError as e: - logger.log( - '%s is down, can\'t use its data to add this show' % sickgear.TVInfoAPI(show_obj.tvid).name, - logger.ERROR) + logger.error(f'{sickgear.TVInfoAPI(show_obj.tvid).name} is down, can\'t use its data to add this show') raise e if not self._valid_show(show_info, show_obj): @@ -141,8 +139,8 @@ class KODIMetadata(generic.GenericMetadata): # check for title and id if None is getattr(show_info, 'seriesname', None) or None is getattr(show_info, 'id', None): - logger.log('Incomplete info for show with id %s on %s, skipping it' % (show_id, sickgear.TVInfoAPI( - show_obj.tvid).name), logger.ERROR) + logger.error(f'Incomplete info for show with id {show_id} on {sickgear.TVInfoAPI(show_obj.tvid).name},' + f' skipping it') return False title = etree.SubElement(tv_node, 'title') @@ -171,8 +169,8 @@ class KODIMetadata(generic.GenericMetadata): uniqueid = etree.SubElement(tv_node, 'uniqueid', **kwargs) uniqueid.text = '%s%s' % (('', 'tt')[TVINFO_IMDB == tvid], mid) if not has_id: - logger.log('Incomplete info for show with id %s on %s, skipping it' % (show_id, sickgear.TVInfoAPI( - show_obj.tvid).name), logger.ERROR) + logger.error(f'Incomplete info for show with id {show_id} on {sickgear.TVInfoAPI(show_obj.tvid).name},' + f' skipping it') return False ratings = etree.SubElement(tv_node, 'ratings') @@ -235,7 +233,7 @@ class KODIMetadata(generic.GenericMetadata): nfo_file_path = self.get_show_file_path(show_obj) - logger.log(u'Writing Kodi metadata file: %s' % nfo_file_path, logger.DEBUG) + logger.debug(f'Writing Kodi metadata file: {nfo_file_path}') data = '\n%s' % data return sg_helpers.write_file(nfo_file_path, data, utf8=True) @@ -261,7 +259,7 @@ class KODIMetadata(generic.GenericMetadata): nfo_file_path = self.get_episode_file_path(ep_obj) - logger.log(u'Writing episode metadata file: %s' % nfo_file_path, logger.DEBUG) + logger.debug(f'Writing episode metadata file: {nfo_file_path}') return sg_helpers.write_file(nfo_file_path, data, xmltree=True, xml_header=True, utf8=True) @@ -292,8 +290,8 @@ class KODIMetadata(generic.GenericMetadata): except BaseTVinfoShownotfound as e: raise exceptions_helper.ShowNotFoundException(ex(e)) except BaseTVinfoError as e: - logger.log('Unable to connect to %s while creating meta files - skipping - %s' % (sickgear.TVInfoAPI( - ep_obj.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Unable to connect to {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name}' + f' while creating meta files - skipping - {ex(e)}') return if not self._valid_show(show_info, ep_obj.show_obj): @@ -318,10 +316,10 @@ class KODIMetadata(generic.GenericMetadata): ep_info['firstaired'] = str(datetime.date.fromordinal(1)) if None is getattr(ep_info, 'episodename', None): - logger.log(u'Not generating nfo because the episode has no title', logger.DEBUG) + logger.debug('Not generating nfo because the episode has no title') return None - logger.log('Creating metadata for episode %sx%s' % (ep_obj.season, ep_obj.episode), logger.DEBUG) + logger.debug('Creating metadata for episode %sx%s' % (ep_obj.season, ep_obj.episode)) if 1 < len(ep_obj_list_to_write): ep_node = etree.SubElement(root_node, 'episodedetails') diff --git a/sickgear/metadata/mede8er.py b/sickgear/metadata/mede8er.py index 3b9759d1..b9f996f5 100644 --- a/sickgear/metadata/mede8er.py +++ b/sickgear/metadata/mede8er.py @@ -127,10 +127,10 @@ class Mede8erMetadata(mediabrowser.MediaBrowserMetadata): try: show_info = t[int(show_obj.prodid)] except BaseTVinfoShownotfound as e: - logger.log(u'Unable to find show with id ' + str(show_obj.prodid) + ' on tvdb, skipping it', logger.ERROR) + logger.error(f'Unable to find show with id {show_obj.prodid} on tvdb, skipping it') raise e except BaseTVinfoError as e: - logger.log(u'TVDB is down, can\'t use its data to make the NFO', logger.ERROR) + logger.error(f'TVDB is down, can\'t use its data to make the NFO') raise e if not self._valid_show(show_info, show_obj): @@ -142,12 +142,12 @@ class Mede8erMetadata(mediabrowser.MediaBrowserMetadata): or '' == show_info['seriesname'] \ or None is show_info['id'] \ or '' == show_info['id']: - logger.log('Incomplete info for show with id %s on %s, skipping it' % - (show_obj.prodid, sickgear.TVInfoAPI(show_obj.tvid).name), logger.ERROR) + logger.error(f'Incomplete info for show with id {show_obj.prodid}' + f' on {sickgear.TVInfoAPI(show_obj.tvid).name}, skipping it') return False except BaseTVinfoAttributenotfound: - logger.log('Incomplete info for show with id %s on %s, skipping it' % - (show_obj.prodid, sickgear.TVInfoAPI(show_obj.tvid).name), logger.ERROR) + logger.error(f'Incomplete info for show with id {show_obj.prodid}' + f' on {sickgear.TVInfoAPI(show_obj.tvid).name}, skipping it') return False SeriesName = etree.SubElement(tv_node, 'title') @@ -241,8 +241,8 @@ class Mede8erMetadata(mediabrowser.MediaBrowserMetadata): except BaseTVinfoShownotfound as e: raise exceptions_helper.ShowNotFoundException(ex(e)) except BaseTVinfoError as e: - logger.log('Unable to connect to %s while creating meta files - skipping - %s' % - (sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Unable to connect to {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name}' + f' while creating meta files - skipping - {ex(e)}') return False if not self._valid_show(show_info, ep_obj.show_obj): @@ -261,8 +261,8 @@ class Mede8erMetadata(mediabrowser.MediaBrowserMetadata): try: ep_info = show_info[cur_ep_obj.season][cur_ep_obj.episode] except (BaseException, Exception): - logger.log(u'Unable to find episode %sx%s on tvdb... has it been removed? Should I delete from db?' % - (cur_ep_obj.season, cur_ep_obj.episode)) + logger.log(f'Unable to find episode {cur_ep_obj.season}x{cur_ep_obj.episode} on tvdb...' + f' has it been removed? Should it be deleted from the db?') return None if cur_ep_obj == ep_obj: diff --git a/sickgear/metadata/mediabrowser.py b/sickgear/metadata/mediabrowser.py index d3a2947a..f8c1e8d3 100644 --- a/sickgear/metadata/mediabrowser.py +++ b/sickgear/metadata/mediabrowser.py @@ -123,7 +123,7 @@ class MediaBrowserMetadata(generic.GenericMetadata): metadata_dir_name = os.path.join(os.path.dirname(ep_obj.location), 'metadata') xml_file_path = os.path.join(metadata_dir_name, xml_file_name) else: - logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG) + logger.debug(f'Episode location doesn\'t exist: {ep_obj.location}') return '' return xml_file_path @@ -175,10 +175,10 @@ class MediaBrowserMetadata(generic.GenericMetadata): break if not season_dir: - logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG) + logger.debug(f'Unable to find a season dir for season {season}') return None - logger.log(u"Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG) + logger.debug(f'Using {season_dir}/folder.jpg as season dir for season {season}') return os.path.join(show_obj.location, season_dir, 'folder.jpg') @@ -215,10 +215,10 @@ class MediaBrowserMetadata(generic.GenericMetadata): break if not season_dir: - logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG) + logger.debug(f'Unable to find a season dir for season {season}') return None - logger.log(u"Using " + str(season_dir) + "/banner.jpg as season dir for season " + str(season), logger.DEBUG) + logger.debug(f'Using {season_dir}/banner.jpg as season dir for season {season}') return os.path.join(show_obj.location, season_dir, 'banner.jpg') @@ -252,12 +252,11 @@ class MediaBrowserMetadata(generic.GenericMetadata): try: show_info = t[int(show_obj.prodid)] except BaseTVinfoShownotfound as e: - logger.log("Unable to find show with id %s on %s, skipping it" % - (show_obj.prodid, sickgear.TVInfoAPI(show_obj.tvid).name), logger.ERROR) + logger.error(f'Unable to find show with id {show_obj.prodid} ' + f'on {sickgear.TVInfoAPI(show_obj.tvid).name}, skipping it') raise e except BaseTVinfoError as e: - logger.log("%s is down, can't use its data to make the NFO" % sickgear.TVInfoAPI(show_obj.tvid).name, - logger.ERROR) + logger.error('%s is down, can\'t use its data to make the NFO' % sickgear.TVInfoAPI(show_obj.tvid).name) raise e if not self._valid_show(show_info, show_obj): @@ -265,8 +264,8 @@ class MediaBrowserMetadata(generic.GenericMetadata): # check for title and id if None is getattr(show_info, 'seriesname', None) or None is getattr(show_info, 'id', None): - logger.log("Incomplete info for show with id %s on %s, skipping it" % - (show_obj.prodid, sickgear.TVInfoAPI(show_obj.tvid).name), logger.ERROR) + logger.error(f'Incomplete info for show with id {show_obj.prodid}' + f' on {sickgear.TVInfoAPI(show_obj.tvid).name}, skipping it') return False prodid = etree.SubElement(tv_node, "id") @@ -415,8 +414,8 @@ class MediaBrowserMetadata(generic.GenericMetadata): except BaseTVinfoShownotfound as e: raise exceptions_helper.ShowNotFoundException(ex(e)) except BaseTVinfoError as e: - logger.log("Unable to connect to %s while creating meta files - skipping - %s" % - (sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Unable to connect to {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name}' + f' while creating meta files - skipping - {ex(e)}') return False if not self._valid_show(show_info, ep_obj.show_obj): diff --git a/sickgear/metadata/tivo.py b/sickgear/metadata/tivo.py index eced781d..b3a040d3 100644 --- a/sickgear/metadata/tivo.py +++ b/sickgear/metadata/tivo.py @@ -158,7 +158,7 @@ class TIVOMetadata(generic.GenericMetadata): metadata_dir_name = os.path.join(os.path.dirname(ep_obj.location), '.meta') metadata_file_path = os.path.join(metadata_dir_name, metadata_file_name) else: - logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG) + logger.debug(f'Episode location doesn\'t exist: {ep_obj.location}') return '' return metadata_file_path @@ -203,8 +203,8 @@ class TIVOMetadata(generic.GenericMetadata): except BaseTVinfoShownotfound as e: raise exceptions_helper.ShowNotFoundException(ex(e)) except BaseTVinfoError as e: - logger.log("Unable to connect to %s while creating meta files - skipping - %s" % - (sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Unable to connect to {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name}' + f' while creating meta files - skipping - {ex(e)}') return False if not self._valid_show(show_info, ep_obj.show_obj): @@ -251,10 +251,10 @@ class TIVOMetadata(generic.GenericMetadata): # Write the synopsis of the video here sanitizedDescription = cur_ep_obj.description # Replace double curly quotes - sanitizedDescription = sanitizedDescription.replace(u"\u201c", "\"").replace(u"\u201d", "\"") + sanitizedDescription = sanitizedDescription.replace('\u201c', '"').replace('\u201d', '"') # Replace single curly quotes - sanitizedDescription = sanitizedDescription.replace(u"\u2018", "'").replace(u"\u2019", "'").replace( - u"\u02BC", "'") + sanitizedDescription = sanitizedDescription.replace('\u2018', '\'').replace('\u2019', '\'').replace( + '\u02BC', '\'') data += ("description : " + sanitizedDescription + "\n") @@ -337,11 +337,11 @@ class TIVOMetadata(generic.GenericMetadata): try: if not os.path.isdir(nfo_file_dir): - logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG) + logger.debug(f'Metadata dir didn\'t exist, creating it at {nfo_file_dir}') os.makedirs(nfo_file_dir) sg_helpers.chmod_as_parent(nfo_file_dir) - logger.log(u"Writing episode nfo file to " + nfo_file_path, logger.DEBUG) + logger.debug(f'Writing episode nfo file to {nfo_file_path}') with open(nfo_file_path, 'w') as nfo_file: # Calling encode directly, b/c often descriptions have wonky characters. @@ -350,8 +350,7 @@ class TIVOMetadata(generic.GenericMetadata): sg_helpers.chmod_as_parent(nfo_file_path) except EnvironmentError as e: - logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e), - logger.ERROR) + logger.error(f'Unable to write file to {nfo_file_path} - are you sure the folder is writable? {ex(e)}') return False return True diff --git a/sickgear/metadata/wdtv.py b/sickgear/metadata/wdtv.py index 0864e43d..9dda6a5d 100644 --- a/sickgear/metadata/wdtv.py +++ b/sickgear/metadata/wdtv.py @@ -168,10 +168,10 @@ class WDTVMetadata(generic.GenericMetadata): break if not season_dir: - logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG) + logger.debug(f'Unable to find a season dir for season {season}') return None - logger.log(u"Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG) + logger.debug(f'Using {season_dir}/folder.jpg as season dir for season {season}') return os.path.join(show_obj.location, season_dir, 'folder.jpg') @@ -204,8 +204,8 @@ class WDTVMetadata(generic.GenericMetadata): except BaseTVinfoShownotfound as e: raise exceptions_helper.ShowNotFoundException(ex(e)) except BaseTVinfoError as e: - logger.log("Unable to connect to %s while creating meta files - skipping - %s" % - (sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Unable to connect to {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name}' + f' while creating meta files - skipping - {ex(e)}') return False if not self._valid_show(show_info, ep_obj.show_obj): diff --git a/sickgear/metadata/xbmc_12plus.py b/sickgear/metadata/xbmc_12plus.py index f0b709a4..57b0f2de 100644 --- a/sickgear/metadata/xbmc_12plus.py +++ b/sickgear/metadata/xbmc_12plus.py @@ -123,12 +123,11 @@ class XBMC12PlusMetadata(generic.GenericMetadata): try: show_info = t[int(show_id)] except BaseTVinfoShownotfound as e: - logger.log('Unable to find show with id %s on %s, skipping it' % - (show_id, sickgear.TVInfoAPI(show_obj.tvid).name), logger.ERROR) + logger.error(f'Unable to find show with id {show_id} on {sickgear.TVInfoAPI(show_obj.tvid).name},' + f' skipping it') raise e except BaseTVinfoError as e: - logger.log('%s is down, can\'t use its data to add this show' % sickgear.TVInfoAPI(show_obj.tvid).name, - logger.ERROR) + logger.error('%s is down, can\'t use its data to add this show' % sickgear.TVInfoAPI(show_obj.tvid).name) raise e if not self._valid_show(show_info, show_obj): @@ -136,8 +135,8 @@ class XBMC12PlusMetadata(generic.GenericMetadata): # check for title and id if None is getattr(show_info, 'seriesname', None) or None is getattr(show_info, 'id', None): - logger.log('Incomplete info for show with id %s on %s, skipping it' % - (show_id, sickgear.TVInfoAPI(show_obj.tvid).name), logger.ERROR) + logger.error(f'Incomplete info for show with id {show_id} on {sickgear.TVInfoAPI(show_obj.tvid).name},' + f' skipping it') return False title = etree.SubElement(tv_node, 'title') @@ -227,8 +226,9 @@ class XBMC12PlusMetadata(generic.GenericMetadata): except BaseTVinfoShownotfound as e: raise exceptions_helper.ShowNotFoundException(ex(e)) except BaseTVinfoError as e: - logger.log('Unable to connect to %s while creating meta files - skipping - %s' % - (sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error( + f'Unable to connect to {sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name} while creating meta files' + f' - skipping - {ex(e)}') return if not self._valid_show(show_info, ep_obj.show_obj): @@ -249,17 +249,17 @@ class XBMC12PlusMetadata(generic.GenericMetadata): (cur_ep_obj.season, cur_ep_obj.episode, sickgear.TVInfoAPI(ep_obj.show_obj.tvid).name)) return None except (BaseException, Exception): - logger.log(u'Not generating nfo because failed to fetched tv info data at this time', logger.DEBUG) + logger.debug('Not generating nfo because failed to fetched tv info data at this time') return None if None is getattr(ep_info, 'firstaired', None): ep_info['firstaired'] = str(datetime.date.fromordinal(1)) if None is getattr(ep_info, 'episodename', None): - logger.log(u'Not generating nfo because the ep has no title', logger.DEBUG) + logger.debug('Not generating nfo because the ep has no title') return None - logger.log(u'Creating metadata for episode ' + str(ep_obj.season) + 'x' + str(ep_obj.episode), logger.DEBUG) + logger.debug(f'Creating metadata for episode {ep_obj.season}x{ep_obj.episode}') if 1 < len(ep_obj_list_to_write): episode = etree.SubElement(rootNode, 'episodedetails') diff --git a/sickgear/name_parser/parser.py b/sickgear/name_parser/parser.py index c1769f1b..46cf2fae 100644 --- a/sickgear/name_parser/parser.py +++ b/sickgear/name_parser/parser.py @@ -98,7 +98,7 @@ class NameParser(object): cur_pattern = strip_comment.sub('', cur_pattern) cur_regex = re.compile('(?x)' + cur_pattern, re.VERBOSE | re.IGNORECASE) except re.error as errormsg: - logger.log(u'WARNING: Invalid episode_pattern, %s. %s' % (errormsg, cur_pattern)) + logger.log(f'WARNING: Invalid episode_pattern, {errormsg}. {cur_pattern}') else: cls.compiled_regexes[index].append([cur_pattern_num, cur_pattern_name, cur_regex]) index += 1 @@ -380,12 +380,11 @@ class NameParser(object): season_number = int(ep_obj['seasonnumber']) episode_numbers = [int(ep_obj['episodenumber'])] except BaseTVinfoEpisodenotfound: - logger.warning(u'Unable to find episode with date %s for show %s, skipping' % - (best_result.air_date, show_obj.unique_name)) + logger.warning(f'Unable to find episode with date {best_result.air_date}' + f' for show {show_obj.unique_name}, skipping') episode_numbers = [] except BaseTVinfoError as e: - logger.log(u'Unable to contact ' + sickgear.TVInfoAPI(show_obj.tvid).name - + ': ' + ex(e), logger.WARNING) + logger.warning(f'Unable to contact {sickgear.TVInfoAPI(show_obj.tvid).name}: {ex(e)}') episode_numbers = [] for epNo in episode_numbers: @@ -468,9 +467,8 @@ class NameParser(object): best_result.season_number = new_season_numbers[0] if self.convert and show_obj.is_scene: - logger.log(u'Converted parsed result %s into %s' - % (best_result.original_name, decode_str(str(best_result), errors='xmlcharrefreplace')), - logger.DEBUG) + logger.debug(f'Converted parsed result {best_result.original_name}' + f' into {decode_str(best_result, errors="xmlcharrefreplace")}') helpers.cpu_sleep() @@ -646,7 +644,7 @@ class NameParser(object): and any('anime' in wr for wr in final_result.which_regex) == bool(final_result.show_obj.is_anime): name_parser_cache.add(name, final_result) - logger.log(u'Parsed %s into %s' % (name, final_result), logger.DEBUG) + logger.debug(f'Parsed {name} into {final_result}') return final_result @@ -752,9 +750,9 @@ class ParseResult(LegacyParseResult): def __unicode__(self): if None is not self.series_name: - to_return = self.series_name + u' - ' + to_return = f'{self.series_name} - ' else: - to_return = u'' + to_return = '' if None is not self.season_number: to_return += 'S' + str(self.season_number) if self.episode_numbers and len(self.episode_numbers): @@ -863,7 +861,7 @@ class NameParserCache(object): key = self._previous_parsed.first_key() del self._previous_parsed[key] except KeyError: - logger.log('Could not remove old NameParserCache entry: %s' % key, logger.DEBUG) + logger.debug('Could not remove old NameParserCache entry: %s' % key) def get(self, name): # type: (AnyStr) -> ParseResult @@ -876,7 +874,7 @@ class NameParserCache(object): """ with self.lock: if name in self._previous_parsed: - logger.log('Using cached parse result for: ' + name, logger.DEBUG) + logger.debug('Using cached parse result for: ' + name) self._previous_parsed.move_to_end(name) return self._previous_parsed[name] diff --git a/sickgear/naming.py b/sickgear/naming.py index 6d34d227..3d2378b2 100644 --- a/sickgear/naming.py +++ b/sickgear/naming.py @@ -165,11 +165,11 @@ def check_valid_naming(pattern=None, multi=None, anime_type=None): if None is anime_type: anime_type = sickgear.NAMING_ANIME - logger.log(u'Checking whether the pattern %s is valid for a single episode' % pattern, logger.DEBUG) + logger.debug(f'Checking whether the pattern {pattern} is valid for a single episode') valid = validate_name(pattern, None, anime_type) if None is not multi: - logger.log(u'Checking whether the pattern %s is valid for a multi episode' % pattern, logger.DEBUG) + logger.debug(f'Checking whether the pattern {pattern} is valid for a multi episode') valid = valid and validate_name(pattern, multi, anime_type) return valid @@ -188,7 +188,7 @@ def check_valid_abd_naming(pattern=None): if None is pattern: pattern = sickgear.NAMING_PATTERN - logger.log(u'Checking whether the pattern %s is valid for an air-by-date episode' % pattern, logger.DEBUG) + logger.debug(f'Checking whether the pattern {pattern} is valid for an air-by-date episode') valid = validate_name(pattern, abd=True) return valid @@ -207,7 +207,7 @@ def check_valid_sports_naming(pattern=None): if None is pattern: pattern = sickgear.NAMING_PATTERN - logger.log(u'Checking whether the pattern %s is valid for an sports episode' % pattern, logger.DEBUG) + logger.debug(f'Checking whether the pattern {pattern} is valid for an sports episode') valid = validate_name(pattern, sports=True) return valid @@ -233,43 +233,43 @@ def validate_name(pattern, multi=None, anime_type=None, file_only=False, abd=Fal """ sample_ep_obj = generate_sample_ep(multi, abd, sports, anime_type=anime_type) - new_name = u'%s.ext' % sample_ep_obj.formatted_filename(pattern, multi, anime_type) + new_name = f'{sample_ep_obj.formatted_filename(pattern, multi, anime_type)}.ext' new_path = sample_ep_obj.formatted_dir(pattern, multi) if not file_only: new_name = os.path.join(new_path, new_name) if not new_name: - logger.log(u'Unable to create a name out of %s' % pattern, logger.DEBUG) + logger.debug(f'Unable to create a name out of {pattern}') return False - logger.log(u'Trying to parse %s' % new_name, logger.DEBUG) + logger.debug(f'Trying to parse {new_name}') parser = NameParser(True, show_obj=sample_ep_obj.show_obj, naming_pattern=True) try: result = parser.parse(new_name) except (BaseException, Exception): - logger.log(u'Unable to parse %s, not valid' % new_name, logger.DEBUG) + logger.debug(f'Unable to parse {new_name}, not valid') return False - logger.log(u'The name %s parsed into %s' % (new_name, result), logger.DEBUG) + logger.debug(f'The name {new_name} parsed into {result}') if abd or sports: if result.air_date != sample_ep_obj.airdate: - logger.log(u'Air date incorrect in parsed episode, pattern isn\'t valid', logger.DEBUG) + logger.debug('Air date incorrect in parsed episode, pattern isn\'t valid') return False elif 3 == anime_type: if result.season_number != sample_ep_obj.season: - logger.log(u'Season number incorrect in parsed episode, pattern isn\'t valid', logger.DEBUG) + logger.debug('Season number incorrect in parsed episode, pattern isn\'t valid') return False if result.episode_numbers != [x.episode for x in [sample_ep_obj] + sample_ep_obj.related_ep_obj]: - logger.log(u'Episode numbering incorrect in parsed episode, pattern isn\'t valid', logger.DEBUG) + logger.debug('Episode numbering incorrect in parsed episode, pattern isn\'t valid') return False else: if len(result.ab_episode_numbers) \ and result.ab_episode_numbers != [x.absolute_number for x in [sample_ep_obj] + sample_ep_obj.related_ep_obj]: - logger.log(u'Absolute numbering incorrect in parsed episode, pattern isn\'t valid', logger.DEBUG) + logger.debug('Absolute numbering incorrect in parsed episode, pattern isn\'t valid') return False return True diff --git a/sickgear/network_timezones.py b/sickgear/network_timezones.py index c6c548ce..abedde55 100644 --- a/sickgear/network_timezones.py +++ b/sickgear/network_timezones.py @@ -156,9 +156,9 @@ def _remove_old_zoneinfo(): for _dir in (sickgear.ZONEINFO_DIR, )]): # type: DirEntry if current_file != entry.path: if remove_file_perm(entry.path, log_err=False): - logger.log(u'Delete unneeded old zoneinfo File: %s' % entry.path) + logger.log(f'Delete unneeded old zoneinfo File: {entry.path}') else: - logger.log(u'Unable to delete: %s' % entry.path, logger.ERROR) + logger.error(f'Unable to delete: {entry.path}') def _update_zoneinfo(): @@ -175,16 +175,15 @@ def _update_zoneinfo(): if None is url_data: update_last_retry() # when None is urlData, trouble connecting to GitHub - logger.log(u'Fetching zoneinfo.txt failed, this can happen from time to time. Unable to get URL: %s' % url, - logger.WARNING) + logger.warning(f'Fetching zoneinfo.txt failed, this can happen from time to time. Unable to get URL: {url}') return reset_last_retry() try: - (new_zoneinfo, zoneinfo_md5) = url_data.strip().rsplit(u' ') + (new_zoneinfo, zoneinfo_md5) = url_data.strip().rsplit(' ') except (BaseException, Exception): - logger.log('Fetching zoneinfo.txt failed, update contains unparsable data: %s' % url_data, logger.DEBUG) + logger.debug('Fetching zoneinfo.txt failed, update contains unparsable data: %s' % url_data) return current_file = zoneinfo.ZONEFILENAME @@ -206,25 +205,25 @@ def _update_zoneinfo(): return # load the new zoneinfo - url_tar = u'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/%s' % new_zoneinfo + url_tar = f'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/{new_zoneinfo}' zonefile_tmp = re.sub(r'\.tar\.gz$', '.tmp', zonefile) if not remove_file_perm(zonefile_tmp, log_err=False): - logger.log(u'Unable to delete: %s' % zonefile_tmp, logger.ERROR) + logger.error(f'Unable to delete: {zonefile_tmp}') return if not helpers.download_file(url_tar, zonefile_tmp): return if not os.path.exists(zonefile_tmp): - logger.log(u'Download of %s failed.' % zonefile_tmp, logger.ERROR) + logger.error(f'Download of {zonefile_tmp} failed.') return new_hash = str(helpers.md5_for_file(zonefile_tmp)) if zoneinfo_md5.upper() == new_hash.upper(): - logger.log(u'Updating timezone info with new one: %s' % new_zoneinfo, logger.MESSAGE) + logger.log(f'Updating timezone info with new one: {new_zoneinfo}', logger.MESSAGE) try: # remove the old zoneinfo file if None is not current_file: @@ -245,7 +244,7 @@ def _update_zoneinfo(): return else: remove_file_perm(zonefile_tmp, log_err=False) - logger.log(u'MD5 hash does not match: %s File: %s' % (zoneinfo_md5.upper(), new_hash.upper()), logger.ERROR) + logger.error(f'MD5 hash does not match: {zoneinfo_md5.upper()} File: {new_hash.upper()}') return @@ -270,7 +269,7 @@ def update_network_dict(): if url_data in (None, ''): update_last_retry() # When None is urlData, trouble connecting to GitHub - logger.debug(u'Updating network timezones failed, this can happen from time to time. URL: %s' % url) + logger.debug(f'Updating network timezones failed, this can happen from time to time. URL: {url}') load_network_dict(load=False) return @@ -279,7 +278,7 @@ def update_network_dict(): try: for line in url_data.splitlines(): try: - (name, tzone) = line.strip().rsplit(u':', 1) + (name, tzone) = line.strip().rsplit(':', 1) except (BaseException, Exception): continue if None is name or None is tzone: @@ -512,14 +511,14 @@ def _load_network_conversions(): if url_data in (None, ''): update_last_retry() # when no url_data, trouble connecting to GitHub - logger.debug(u'Updating network conversions failed, this can happen from time to time. URL: %s' % url) + logger.debug(f'Updating network conversions failed, this can happen from time to time. URL: {url}') return reset_last_retry() try: for line in url_data.splitlines(): - (tvdb_network, tvrage_network, tvrage_country) = line.strip().rsplit(u'::', 2) + (tvdb_network, tvrage_network, tvrage_country) = line.strip().rsplit('::', 2) if not (tvdb_network and tvrage_network and tvrage_country): continue conversions_in.append( diff --git a/sickgear/notifiers/boxcar2.py b/sickgear/notifiers/boxcar2.py index c8d8f16f..b6d2ecd2 100644 --- a/sickgear/notifiers/boxcar2.py +++ b/sickgear/notifiers/boxcar2.py @@ -72,7 +72,7 @@ class Boxcar2Notifier(Notifier): except urllib.error.HTTPError as e: if not hasattr(e, 'code'): - self._log_error(u'Notification failed: %s' % ex(e)) + self._log_error(f'Notification failed: {ex(e)}') else: result = 'Notification failed. Error code: %s' % e.code self._log_error(result) @@ -91,7 +91,7 @@ class Boxcar2Notifier(Notifier): result = 'Wrong data sent to Boxcar' self._log_error(result) except urllib.error.URLError as e: - self._log_error(u'Notification failed: %s' % ex(e)) + self._log_error(f'Notification failed: ex(e)') return self._choose((True, 'Failed to send notification: %s' % result)[bool(result)], not bool(result)) diff --git a/sickgear/notifiers/emailnotify.py b/sickgear/notifiers/emailnotify.py index 68c51bbd..d4dab8f5 100644 --- a/sickgear/notifiers/emailnotify.py +++ b/sickgear/notifiers/emailnotify.py @@ -44,8 +44,8 @@ class EmailNotifier(Notifier): use_tls = 1 == sickgear.helpers.try_int(use_tls) login = any(user) and any(pwd) - self._log_debug(u'Sendmail HOST: %s; PORT: %s; LOGIN: %s, TLS: %s, USER: %s, FROM: %s, TO: %s' % ( - host, port, login, use_tls, user, smtp_from, to)) + self._log_debug(f'Sendmail HOST: {host}; PORT: {port};' + f' LOGIN: {login}, TLS: {use_tls}, USER: {user}, FROM: {smtp_from}, TO: {to}') try: srv = smtplib.SMTP(host, int(port)) @@ -54,16 +54,16 @@ class EmailNotifier(Notifier): if use_tls or login: srv.ehlo() - self._log_debug(u'Sent initial EHLO command') + self._log_debug('Sent initial EHLO command') if use_tls: srv.starttls() srv.ehlo() - self._log_debug(u'Sent STARTTLS and EHLO command') + self._log_debug('Sent STARTTLS and EHLO command') if login: srv.login(user, pwd) - self._log_debug(u'Sent LOGIN command') + self._log_debug('Sent LOGIN command') srv.sendmail(smtp_from, to, msg.as_string()) srv.quit() @@ -101,10 +101,10 @@ class EmailNotifier(Notifier): show_name = body.split(' - ')[0] to = self._get_recipients(show_name) if not any(to): - self._log_warning(u'No email recipients to notify, skipping') + self._log_warning('No email recipients to notify, skipping') return - self._log_debug(u'Email recipients to notify: %s' % to) + self._log_debug(f'Email recipients to notify: {to}') try: msg = MIMEMultipart('alternative') @@ -131,9 +131,9 @@ class EmailNotifier(Notifier): msg['Date'] = formatdate(localtime=True) if self._sendmail(sickgear.EMAIL_HOST, sickgear.EMAIL_PORT, sickgear.EMAIL_FROM, sickgear.EMAIL_TLS, sickgear.EMAIL_USER, sickgear.EMAIL_PASSWORD, to, msg): - self._log_debug(u'%s notification sent to [%s] for "%s"' % (title, to, body)) + self._log_debug(f'{title} notification sent to [{to}] for "{body}"') else: - self._log_error(u'%s notification ERROR: %s' % (title, self.last_err)) + self._log_error(f'{title} notification ERROR: {self.last_err}') def test_notify(self, host, port, smtp_from, use_tls, user, pwd, to): self._testing = True diff --git a/sickgear/notifiers/emby.py b/sickgear/notifiers/emby.py index 81065c00..91e3e27e 100644 --- a/sickgear/notifiers/emby.py +++ b/sickgear/notifiers/emby.py @@ -61,7 +61,7 @@ class EmbyNotifier(Notifier): """ hosts, keys, message = self._check_config() if not hosts: - self._log_warning(u'Issue with hosts or api keys, check your settings') + self._log_warning('Issue with hosts or api keys, check your settings') return False from sickgear.indexers import indexer_config @@ -98,10 +98,10 @@ class EmbyNotifier(Notifier): timeout=20, hooks=dict(response=self._cb_response), **args) # Emby will initiate a LibraryMonitor path refresh one minute after this success if self.response and 204 == self.response.get('status_code') and self.response.get('ok'): - self._log(u'Success: update %s sent to host %s in a library updated call' % (mode_to_log, cur_host)) + self._log(f'Success: update {mode_to_log} sent to host {cur_host} in a library updated call') continue elif self.response and 401 == self.response.get('status_code'): - self._log_warning(u'Failed to authenticate with %s' % cur_host) + self._log_warning(f'Failed to authenticate with {cur_host}') elif self.response and 404 == self.response.get('status_code'): self.response = None sickgear.helpers.get_url( @@ -109,16 +109,16 @@ class EmbyNotifier(Notifier): headers={'Content-type': 'application/json', 'X-MediaBrowser-Token': keys[i]}, timeout=20, hooks=dict(response=self._cb_response), post_json={'Path': '', 'UpdateType': ''}) if self.response and 204 == self.response.get('status_code') and self.response.get('ok'): - self._log(u'Success: fallback to sending Library/Media/Updated call' - u' to scan all shows at host %s' % cur_host) + self._log(f'Success: fallback to sending Library/Media/Updated call' + f' to scan all shows at host {cur_host}') continue - self._log_debug(u'Warning, Library update responded 404 not found and' - u' fallback to new /Library/Media/Updated api call failed at %s' % cur_host) + self._log_debug(f'Warning, Library update responded 404 not found and' + f' fallback to new /Library/Media/Updated api call failed at {cur_host}') elif not response and not self.response or not self.response.get('ok'): - self._log_warning(u'Warning, could not connect with server at %s' % cur_host) + self._log_warning(f'Warning, could not connect with server at {cur_host}') else: - self._log_debug(u'Warning, unknown response %sfrom %s, can most likely be ignored' - % (self.response and '%s ' % self.response.get('status_code') or '', cur_host)) + self._log_debug(f'Warning, unknown response %sfrom {cur_host}, can most likely be ignored' + % (self.response and '%s ' % self.response.get('status_code') or '')) total_success = False return total_success @@ -181,7 +181,7 @@ class EmbyNotifier(Notifier): if len(hosts) != len(apikeys): message = ('Not enough Api keys for hosts', 'More Api keys than hosts')[len(apikeys) > len(hosts)] - self._log_warning(u'%s, check your settings' % message) + self._log_warning(f'{message}, check your settings') return False, False, message return hosts, apikeys, 'OK' @@ -215,12 +215,12 @@ class EmbyNotifier(Notifier): if self.response and 401 == self.response.get('status_code'): success = False message += ['Fail: Cannot authenticate API key with %s' % cur_host] - self._log_warning(u'Failed to authenticate with %s' % cur_host) + self._log_warning(f'Failed to authenticate with {cur_host}') continue elif not response and not self.response or not self.response.get('ok'): success = False message += ['Fail: No supported Emby server found at %s' % cur_host] - self._log_warning(u'Warning, could not connect with server at ' + cur_host) + self._log_warning(f'Warning, could not connect with server at {cur_host}') continue message += ['OK: %s' % cur_host] diff --git a/sickgear/notifiers/generic.py b/sickgear/notifiers/generic.py index ce4d98f2..a9f14ff8 100644 --- a/sickgear/notifiers/generic.py +++ b/sickgear/notifiers/generic.py @@ -25,7 +25,7 @@ notify_strings = dict( git_updated='SickGear updated', git_updated_text='SickGear updated to commit#: ', test_title='SickGear notification test', - test_body=u'Success testing %s settings from SickGear ʕ•ᴥ•ʔ', + test_body='Success testing %s settings from SickGear ʕ•ᴥ•ʔ', ) @@ -40,7 +40,7 @@ class BaseNotifier(object): return 'https://raw.githubusercontent.com/SickGear/SickGear/main/gui/slick/images/ico/' + self.sg_logo_file def _log(self, msg, level=logger.MESSAGE): - logger.log(u'%s: %s' % (self.name, msg), level) + logger.log(f'{self.name}: {msg}', level) def _log_debug(self, msg): self._log(msg, logger.DEBUG) @@ -108,7 +108,7 @@ class BaseNotifier(object): @staticmethod def _body_only(title, body): # don't use title with updates or testing, as only one str is used - return body if 'SickGear' in title else u'%s: %s' % (title, body.replace('#: ', '# ')) + return body if 'SickGear' in title else f'{title}: {body.replace("#: ", "# ")}' class Notifier(BaseNotifier): @@ -136,7 +136,7 @@ class Notifier(BaseNotifier): self._pre_notify('git_updated', notify_strings['git_updated_text'] + new_version, **kwargs) def _pre_notify(self, notify_string, message, *args, **kwargs): - self._log_debug(u'Sending notification "%s"' % (self._body_only(notify_strings[notify_string], message))) + self._log_debug(f'Sending notification "{self._body_only(notify_strings[notify_string], message)}"') try: return self._notify(notify_strings[notify_string], message, *args, **kwargs) except (BaseException, Exception): diff --git a/sickgear/notifiers/growl.py b/sickgear/notifiers/growl.py index 4eaa872e..ffc51c9a 100644 --- a/sickgear/notifiers/growl.py +++ b/sickgear/notifiers/growl.py @@ -94,7 +94,7 @@ class GrowlNotifier(Notifier): success = True except (BaseException, Exception) as e: - self._log_warning(u'Unable to send growl to %s:%s - %s' % (opts['host'], opts['port'], ex(e))) + self._log_warning(f'Unable to send growl to {opts["host"]}:{opts["port"]} - {ex(e)}') return success diff --git a/sickgear/notifiers/kodi.py b/sickgear/notifiers/kodi.py index 7b652f17..5fb783e8 100644 --- a/sickgear/notifiers/kodi.py +++ b/sickgear/notifiers/kodi.py @@ -94,7 +94,7 @@ class KodiNotifier(Notifier): Returns: True if processing succeeded with no issues else False if any issues found """ if not sickgear.KODI_HOST: - self._log_warning(u'No Kodi hosts specified, check your settings') + self._log_warning('No Kodi hosts specified, check your settings') return False # either update each host, or only attempt to update until one successful result @@ -108,7 +108,7 @@ class KodiNotifier(Notifier): response = self._send_json(cur_host, dict(method='Profiles.GetCurrentProfile')) if self.response and 401 == self.response.get('status_code'): - self._log_debug(u'Failed to authenticate with %s' % cur_host) + self._log_debug(f'Failed to authenticate with {cur_host}') continue if not response: self._maybe_log_failed_detection(cur_host) @@ -117,7 +117,7 @@ class KodiNotifier(Notifier): if self._send_library_update(cur_host, show_name): only_first.update(dict(profile=response.get('label') or 'Master', host=cur_host)) self._log('Success: profile;' + - u'"%(profile)s" at%(first)s host;%(host)s updated%(show)s%(first_note)s' % only_first) + '"%(profile)s" at%(first)s host;%(host)s updated%(show)s%(first_note)s' % only_first) else: self._maybe_log_failed_detection(cur_host) result += 1 @@ -148,10 +148,10 @@ class KodiNotifier(Notifier): failed_msg = 'Single show update failed,' if sickgear.KODI_UPDATE_FULL: - self._log_debug(u'%s falling back to full update' % failed_msg) + self._log_debug(f'{failed_msg} falling back to full update') return __method_update(host) - self._log_debug(u'%s consider enabling "Perform full library update" in config/notifications' % failed_msg) + self._log_debug(f'{failed_msg} consider enabling "Perform full library update" in config/notifications') return False ############################################################################## @@ -169,7 +169,7 @@ class KodiNotifier(Notifier): """ if not host: - self._log_warning(u'No host specified, aborting update') + self._log_warning('No host specified, aborting update') return False args = {} @@ -198,14 +198,14 @@ class KodiNotifier(Notifier): """ if not host: - self._log_warning(u'No host specified, aborting update') + self._log_warning('No host specified, aborting update') return False - self._log_debug(u'Updating library via HTTP method for host: %s' % host) + self._log_debug(f'Updating library via HTTP method for host: {host}') # if we're doing per-show if show_name: - self._log_debug(u'Updating library via HTTP method for show %s' % show_name) + self._log_debug(f'Updating library via HTTP method for show {show_name}') # noinspection SqlResolve path_sql = 'SELECT path.strPath' \ @@ -223,29 +223,28 @@ class KodiNotifier(Notifier): # sql used to grab path(s) response = self._send(host, {'command': 'QueryVideoDatabase(%s)' % path_sql}) if not response: - self._log_debug(u'Invalid response for %s on %s' % (show_name, host)) + self._log_debug(f'Invalid response for {show_name} on {host}') return False try: et = etree.fromstring(quote(response, ':\\/<>')) except SyntaxError as e: - self._log_error(u'Unable to parse XML in response: %s' % ex(e)) + self._log_error(f'Unable to parse XML in response: {ex(e)}') return False paths = et.findall('.//field') if not paths: - self._log_debug(u'No valid path found for %s on %s' % (show_name, host)) + self._log_debug(f'No valid path found for {show_name} on {host}') return False for path in paths: # we do not need it double-encoded, gawd this is dumb un_enc_path = decode_str(unquote(path.text), sickgear.SYS_ENCODING) - self._log_debug(u'Updating %s on %s at %s' % (show_name, host, un_enc_path)) + self._log_debug(f'Updating {show_name} on {host} at {un_enc_path}') if not self._send( host, dict(command='ExecBuiltIn', parameter='Kodi.updatelibrary(video, %s)' % un_enc_path)): - self._log_error(u'Update of show directory failed for %s on %s at %s' - % (show_name, host, un_enc_path)) + self._log_error(f'Update of show directory failed for {show_name} on {host} at {un_enc_path}') return False # sleep for a few seconds just to be sure kodi has a chance to finish each directory @@ -253,10 +252,10 @@ class KodiNotifier(Notifier): time.sleep(5) # do a full update if requested else: - self._log_debug(u'Full library update on host: %s' % host) + self._log_debug(f'Full library update on host: {host}') if not self._send(host, dict(command='ExecBuiltIn', parameter='Kodi.updatelibrary(video)')): - self._log_error(u'Failed full library update on: %s' % host) + self._log_error(f'Failed full library update on: {host}') return False return True @@ -277,7 +276,7 @@ class KodiNotifier(Notifier): result = {} if not host: - self._log_warning(u'No host specified, aborting update') + self._log_warning('No host specified, aborting update') return result if isinstance(command, dict): @@ -300,8 +299,8 @@ class KodiNotifier(Notifier): if not response.get('error'): return 'OK' == response.get('result') and {'OK': True} or response.get('result') - self._log_error(u'API error; %s from %s in response to command: %s' - % (json_dumps(response['error']), host, json_dumps(command))) + self._log_error(f'API error; {json_dumps(response["error"])} from {host}' + f' in response to command: {json_dumps(command)}') return result def _update_json(self, host=None, show_name=None): @@ -317,12 +316,12 @@ class KodiNotifier(Notifier): """ if not host: - self._log_warning(u'No host specified, aborting update') + self._log_warning('No host specified, aborting update') return False # if we're doing per-show if show_name: - self._log_debug(u'JSON library update. Host: %s Show: %s' % (host, show_name)) + self._log_debug(f'JSON library update. Host: {host} Show: {show_name}') # try fetching tvshowid using show_name with a fallback to getting show list show_name = unquote_plus(show_name) @@ -339,7 +338,7 @@ class KodiNotifier(Notifier): break if not shows: - self._log_debug(u'No items in GetTVShows response') + self._log_debug('No items in GetTVShows response') return False tvshowid = -1 @@ -354,7 +353,7 @@ class KodiNotifier(Notifier): # we didn't find the show (exact match), thus revert to just doing a full update if enabled if -1 == tvshowid: - self._log_debug(u'Doesn\'t have "%s" in it\'s known shows, full library update required' % show_name) + self._log_debug(f'Doesn\'t have "{show_name}" in it\'s known shows, full library update required') return False # lookup tv-show path if we don't already know it @@ -365,24 +364,24 @@ class KodiNotifier(Notifier): path = 'tvshowdetails' in response and response['tvshowdetails'].get('file', '') or '' if not len(path): - self._log_warning(u'No valid path found for %s with ID: %s on %s' % (show_name, tvshowid, host)) + self._log_warning(f'No valid path found for {show_name} with ID: {tvshowid} on {host}') return False - self._log_debug(u'Updating %s on %s at %s' % (show_name, host, path)) + self._log_debug(f'Updating {show_name} on {host} at {path}') command = dict(method='VideoLibrary.Scan', params={'directory': '%s' % json_dumps(path)[1:-1].replace('\\\\', '\\')}) response_scan = self._send_json(host, command) if not response_scan.get('OK'): - self._log_error(u'Update of show directory failed for %s on %s at %s response: %s' % - (show_name, host, path, response_scan)) + self._log_error(f'Update of show directory failed for {show_name} on {host} at {path}' + f' response: {response_scan}') return False # do a full update if requested else: - self._log_debug(u'Full library update on host: %s' % host) + self._log_debug(f'Full library update on host: {host}') response_scan = self._send_json(host, dict(method='VideoLibrary.Scan')) if not response_scan.get('OK'): - self._log_error(u'Failed full library update on: %s response: %s' % (host, response_scan)) + self._log_error(f'Failed full library update on: {host} response: {response_scan}') return False return True @@ -400,7 +399,7 @@ class KodiNotifier(Notifier): def _maybe_log_failed_detection(self, host, msg='connect to'): - self._maybe_log(u'Failed to %s %s, check device(s) and config' % (msg, host), logger.ERROR) + self._maybe_log(f'Failed to {msg} {host}, check device(s) and config', logger.ERROR) def _notify(self, title, body, hosts=None, username=None, password=None, **kwargs): """ Internal wrapper for the notify_snatch and notify_download functions @@ -429,20 +428,20 @@ class KodiNotifier(Notifier): if self.response and 401 == self.response.get('status_code'): success = False message += ['Fail: Cannot authenticate with %s' % cur_host] - self._log_debug(u'Failed to authenticate with %s' % cur_host) + self._log_debug(f'Failed to authenticate with {cur_host}') elif not api_version: success = False message += ['Fail: No supported Kodi found at %s' % cur_host] self._maybe_log_failed_detection(cur_host, 'connect and detect version for') else: if 4 >= api_version: - self._log_debug(u'Detected %sversion <= 11, using HTTP API' - % self.prefix and ' ' + self.prefix.capitalize()) + self._log_debug(f'Detected {self.prefix and " " + self.prefix.capitalize()}version <= 11,' + f' using HTTP API') __method_send = self._send command = dict(command='ExecBuiltIn', parameter='Notification(%s,%s)' % (title, body)) else: - self._log_debug(u'Detected version >= 12, using JSON API') + self._log_debug('Detected version >= 12, using JSON API') __method_send = self._send_json command = dict(method='GUI.ShowNotification', params=dict( [('title', title), ('message', body), ('image', self._sg_logo_url)] diff --git a/sickgear/notifiers/libnotify.py b/sickgear/notifiers/libnotify.py index c4a82ffb..9dc47547 100644 --- a/sickgear/notifiers/libnotify.py +++ b/sickgear/notifiers/libnotify.py @@ -44,14 +44,14 @@ def diagnose(): try: bus = dbus.SessionBus() except dbus.DBusException as e: - return (u'Error: unable to connect to D-Bus session bus: %s. ' - u'Are you running SickGear in a desktop session?') % (cgi.escape(e),) + return (f'Error: unable to connect to D-Bus session bus: {cgi.escape(e)}.' + f' Are you running SickGear in a desktop session?') try: bus.get_object('org.freedesktop.Notifications', '/org/freedesktop/Notifications') except dbus.DBusException as e: - return (u'Error: there doesn\'t seem to be a notification daemon available: %s ' - u'Try installing notification-daemon or notify-osd.') % (cgi.escape(e),) + return (f'Error: there doesn\'t seem to be a notification daemon available: {cgi.escape(e)}.' + f' Try installing notification-daemon or notify-osd.') return 'Error: Unable to send notification.' @@ -71,18 +71,18 @@ class LibnotifyNotifier(Notifier): # noinspection PyPackageRequirements import pynotify except ImportError: - self._log_error(u'Unable to import pynotify. libnotify notifications won\'t work') + self._log_error("Unable to import pynotify. libnotify notifications won't work") return False try: # noinspection PyPackageRequirements from gi.repository import GObject except ImportError: - self._log_error(u'Unable to import GObject from gi.repository. Cannot catch a GError in display') + self._log_error('Unable to import GObject from gi.repository. Cannot catch a GError in display') return False if not pynotify.init('SickGear'): - self._log_error(u'Initialization of pynotify failed. libnotify notifications won\'t work') + self._log_error('Initialization of pynotify failed. libnotify notifications won\'t work') return False self.pynotify = pynotify diff --git a/sickgear/notifiers/nmj.py b/sickgear/notifiers/nmj.py index 03be6551..fef80f01 100644 --- a/sickgear/notifiers/nmj.py +++ b/sickgear/notifiers/nmj.py @@ -43,11 +43,11 @@ class NMJNotifier(BaseNotifier): try: terminal = telnetlib.Telnet(host) except (BaseException, Exception): - self._log_warning(u'Unable to get a telnet session to %s' % host) + self._log_warning(f'Unable to get a telnet session to {host}') if result: # tell the terminal to output the necessary info to the screen so we can search it later - self._log_debug(u'Connected to %s via telnet' % host) + self._log_debug(f'Connected to {host} via telnet') terminal.read_until('sh-3.00# ') terminal.write('cat /tmp/source\n') terminal.write('cat /tmp/netshare\n') @@ -57,11 +57,11 @@ class NMJNotifier(BaseNotifier): match = re.search(r'(.+\.db)\r\n?(.+)(?=sh-3.00# cat /tmp/netshare)', tnoutput) # if we found the database in the terminal output then save that database to the config if not match: - self._log_warning(u'Could not get current NMJ database on %s, NMJ is probably not running!' % host) + self._log_warning(f'Could not get current NMJ database on {host}, NMJ is probably not running!') else: database = match.group(1) device = match.group(2) - self._log_debug(u'Found NMJ database %s on device %s' % (database, device)) + self._log_debug(f'Found NMJ database {database} on device {device}') sickgear.NMJ_DATABASE = database # if the device is a remote host then try to parse the mounting URL and save it to the config if device.startswith('NETWORK_SHARE/'): @@ -72,7 +72,7 @@ class NMJNotifier(BaseNotifier): 'but could not get the mounting url') else: mount = match.group().replace('127.0.0.1', host) - self._log_debug(u'Found mounting url on the Popcorn Hour in configuration: %s' % mount) + self._log_debug(f'Found mounting url on the Popcorn Hour in configuration: {mount}') sickgear.NMJ_MOUNT = mount result = True @@ -96,23 +96,23 @@ class NMJNotifier(BaseNotifier): database = self._choose(database, sickgear.NMJ_DATABASE) mount = self._choose(mount, sickgear.NMJ_MOUNT) - self._log_debug(u'Sending scan command for NMJ ') + self._log_debug('Sending scan command for NMJ') # if a mount URL is provided then attempt to open a handle to that URL if mount: try: req = urllib.request.Request(mount) - self._log_debug(u'Try to mount network drive via url: %s' % mount) + self._log_debug(f'Try to mount network drive via url: {mount}') http_response_obj = urllib.request.urlopen(req) # PY2 http_response_obj has no `with` context manager http_response_obj.close() except IOError as e: if hasattr(e, 'reason'): - self._log_warning(u'Could not contact Popcorn Hour on host %s: %s' % (host, e.reason)) + self._log_warning(f'Could not contact Popcorn Hour on host {host}: {e.reason}') elif hasattr(e, 'code'): - self._log_warning(u'Problem with Popcorn Hour on host %s: %s' % (host, e.code)) + self._log_warning(f'Problem with Popcorn Hour on host {host}: {e.code}') return False except (BaseException, Exception) as e: - self._log_error(u'Unknown exception: ' + ex(e)) + self._log_error(f'Unknown exception: {ex(e)}') return False # build up the request URL and parameters @@ -123,18 +123,18 @@ class NMJNotifier(BaseNotifier): # send the request to the server try: req = urllib.request.Request(update_url) - self._log_debug(u'Sending scan update command via url: %s' % update_url) + self._log_debug(f'Sending scan update command via url: {update_url}') http_response_obj = urllib.request.urlopen(req) response = http_response_obj.read() http_response_obj.close() except IOError as e: if hasattr(e, 'reason'): - self._log_warning(u'Could not contact Popcorn Hour on host %s: %s' % (host, e.reason)) + self._log_warning(f'Could not contact Popcorn Hour on host {host}: {e.reason}') elif hasattr(e, 'code'): - self._log_warning(u'Problem with Popcorn Hour on host %s: %s' % (host, e.code)) + self._log_warning(f'Problem with Popcorn Hour on host {host}: {e.code}') return False except (BaseException, Exception) as e: - self._log_error(u'Unknown exception: ' + ex(e)) + self._log_error(f'Unknown exception: {ex(e)}') return False # try to parse the resulting XML @@ -142,15 +142,15 @@ class NMJNotifier(BaseNotifier): et = etree.fromstring(response) result = et.findtext('returnValue') except SyntaxError as e: - self._log_error(u'Unable to parse XML returned from the Popcorn Hour: %s' % ex(e)) + self._log_error(f'Unable to parse XML returned from the Popcorn Hour: {ex(e)}') return False - # if the result was a number then consider that an error + # if the result was a number, then consider that an error if 0 < int(result): - self._log_error(u'Popcorn Hour returned an errorcode: %s' % result) + self._log_error(f'Popcorn Hour returned an errorcode: {result}') return False - self._log(u'NMJ started background scan') + self._log('NMJ started background scan') return True def _notify(self, host=None, database=None, mount=None, **kwargs): diff --git a/sickgear/notifiers/nmjv2.py b/sickgear/notifiers/nmjv2.py index 8303bae9..654d69a1 100644 --- a/sickgear/notifiers/nmjv2.py +++ b/sickgear/notifiers/nmjv2.py @@ -78,7 +78,7 @@ class NMJv2Notifier(BaseNotifier): result = True except IOError as e: - self._log_warning(u'Couldn\'t contact popcorn hour on host %s: %s' % (host, ex(e))) + self._log_warning(f'Couldn\'t contact popcorn hour on host {host}: {ex(e)}') if result: return '{"message": "Success, NMJ Database found at: %(host)s", "database": "%(database)s"}' % { @@ -100,7 +100,7 @@ class NMJv2Notifier(BaseNotifier): host = self._choose(host, sickgear.NMJv2_HOST) - self._log_debug(u'Sending scan command for NMJ ') + self._log_debug('Sending scan command for NMJ ') # if a host is provided then attempt to open a handle to that URL try: @@ -108,11 +108,11 @@ class NMJv2Notifier(BaseNotifier): url_scandir = '%s%s%s' % (base_url, 'metadata_database?', urlencode( dict(arg0='update_scandir', arg1=sickgear.NMJv2_DATABASE, arg2='', arg3='update_all'))) - self._log_debug(u'Scan update command sent to host: %s' % host) + self._log_debug(f'Scan update command sent to host: {host}') url_updatedb = '%s%s%s' % (base_url, 'metadata_database?', urlencode( dict(arg0='scanner_start', arg1=sickgear.NMJv2_DATABASE, arg2='background', arg3=''))) - self._log_debug(u'Try to mount network drive via url: %s' % host) + self._log_debug(f'Try to mount network drive via url: {host}') prereq = urllib.request.Request(url_scandir) req = urllib.request.Request(url_updatedb) @@ -127,24 +127,24 @@ class NMJv2Notifier(BaseNotifier): response2 = http_response_obj2.read() http_response_obj2.close() except IOError as e: - self._log_warning(u'Couldn\'t contact popcorn hour on host %s: %s' % (host, ex(e))) + self._log_warning(f'Couldn\'t contact popcorn hour on host {host}: {ex(e)}') return False try: et = etree.fromstring(response1) result1 = et.findtext('returnValue') except SyntaxError as e: - self._log_error(u'Unable to parse XML returned from the Popcorn Hour: update_scandir, %s' % ex(e)) + self._log_error(f'Unable to parse XML returned from the Popcorn Hour: update_scandir, {ex(e)}') return False try: et = etree.fromstring(response2) result2 = et.findtext('returnValue') except SyntaxError as e: - self._log_error(u'Unable to parse XML returned from the Popcorn Hour: scanner_start, %s' % ex(e)) + self._log_error(f'Unable to parse XML returned from the Popcorn Hour: scanner_start, {ex(e)}') return False - # if the result was a number then consider that an error + # if the result was a number, then consider that an error error_codes = ['8', '11', '22', '49', '50', '51', '60'] error_messages = ['Invalid parameter(s)/argument(s)', 'Invalid database path', @@ -155,15 +155,15 @@ class NMJv2Notifier(BaseNotifier): 'Read only file system'] if 0 < int(result1): index = error_codes.index(result1) - self._log_error(u'Popcorn Hour returned an error: %s' % (error_messages[index])) + self._log_error(f'Popcorn Hour returned an error: {error_messages[index]}') return False elif 0 < int(result2): index = error_codes.index(result2) - self._log_error(u'Popcorn Hour returned an error: %s' % (error_messages[index])) + self._log_error(f'Popcorn Hour returned an error: {error_messages[index]}') return False - self._log(u'NMJv2 started background scan') + self._log('NMJv2 started background scan') return True def _notify(self, host=None, **kwargs): diff --git a/sickgear/notifiers/plex.py b/sickgear/notifiers/plex.py index b84c7d89..38e3417a 100644 --- a/sickgear/notifiers/plex.py +++ b/sickgear/notifiers/plex.py @@ -45,33 +45,33 @@ class PLEXNotifier(Notifier): """ if not host: - self._log_error(u'No host specified, check your settings') + self._log_error('No host specified, check your settings') return False for key in command: command[key] = command[key].encode('utf-8') enc_command = urlencode(command) - self._log_debug(u'Encoded API command: ' + enc_command) + self._log_debug(f'Encoded API command: {enc_command}') url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, enc_command) try: req = urllib.request.Request(url) if password: req.add_header('Authorization', 'Basic %s' % b64encodestring('%s:%s' % (username, password))) - self._log_debug(u'Contacting (with auth header) via url: ' + url) + self._log_debug(f'Contacting (with auth header) via url: {url}') else: - self._log_debug(u'Contacting via url: ' + url) + self._log_debug(f'Contacting via url: {url}') http_response_obj = urllib.request.urlopen(req) # PY2 http_response_obj has no `with` context manager result = decode_str(http_response_obj.read(), sickgear.SYS_ENCODING) http_response_obj.close() - self._log_debug(u'HTTP response: ' + result.replace('\n', '')) + self._log_debug('HTTP response: ' + result.replace('\n', '')) return True except (urllib.error.URLError, IOError) as e: - self._log_warning(u'Couldn\'t contact Plex at ' + url + ' ' + ex(e)) + self._log_warning(f'Couldn\'t contact Plex at {url} {ex(e)}') return False @staticmethod @@ -113,7 +113,7 @@ class PLEXNotifier(Notifier): results = [] for cur_host in [x.strip() for x in host.split(',')]: cur_host = unquote_plus(cur_host) - self._log(u'Sending notification to \'%s\'' % cur_host) + self._log(f'Sending notification to \'{cur_host}\'') result = self._send_to_plex(command, cur_host, username, password) results += [self._choose(('%s Plex client ... %s' % (('Successful test notice sent to', 'Failed test for')[not result], cur_host)), result)] @@ -148,7 +148,7 @@ class PLEXNotifier(Notifier): """ host = self._choose(host, sickgear.PLEX_SERVER_HOST) if not host: - msg = u'No Plex Media Server host specified, check your settings' + msg = 'No Plex Media Server host specified, check your settings' self._log_debug(msg) return '%sFail: %s' % (('', '
')[self._testing], msg) @@ -159,7 +159,7 @@ class PLEXNotifier(Notifier): token_arg = None if username and password: - self._log_debug(u'Fetching plex.tv credentials for user: ' + username) + self._log_debug('Fetching plex.tv credentials for user: ' + username) req = urllib.request.Request('https://plex.tv/users/sign_in.xml', data=b'') req.add_header('Authorization', 'Basic %s' % b64encodestring('%s:%s' % (username, password))) req.add_header('X-Plex-Device-Name', 'SickGear') @@ -176,10 +176,10 @@ class PLEXNotifier(Notifier): token_arg = '?X-Plex-Token=' + token except urllib.error.URLError as e: - self._log(u'Error fetching credentials from plex.tv for user %s: %s' % (username, ex(e))) + self._log(f'Error fetching credentials from plex.tv for user {username}: {ex(e)}') except (ValueError, IndexError) as e: - self._log(u'Error parsing plex.tv response: ' + ex(e)) + self._log('Error parsing plex.tv response: ' + ex(e)) file_location = location if None is not location else '' if None is ep_obj else ep_obj.location host_validate = self._get_host_list(host, all([token_arg])) @@ -198,7 +198,7 @@ class PLEXNotifier(Notifier): sections = response.findall('.//Directory') if not sections: - self._log(u'Plex Media Server not running on: ' + cur_host) + self._log('Plex Media Server not running on: ' + cur_host) hosts_failed.append(cur_host) continue @@ -232,17 +232,17 @@ class PLEXNotifier(Notifier): host_list.append(cur_host) else: hosts_failed.append(cur_host) - self._log_error(u'Error updating library section for Plex Media Server: %s' % cur_host) + self._log_error(f'Error updating library section for Plex Media Server: {cur_host}') if len(hosts_failed) == len(host_validate): - self._log(u'No successful Plex host updated') + self._log('No successful Plex host updated') return 'Fail no successful Plex host updated: %s' % ', '.join([host for host in hosts_failed]) else: hosts = ', '.join(set(host_list)) if len(hosts_match): - self._log(u'Hosts updating where TV section paths match the downloaded show: %s' % hosts) + self._log(f'Hosts updating where TV section paths match the downloaded show: {hosts}') else: - self._log(u'Updating all hosts with TV sections: %s' % hosts) + self._log(f'Updating all hosts with TV sections: {hosts}') return '' hosts = [ diff --git a/sickgear/notifiers/prowl.py b/sickgear/notifiers/prowl.py index 876d66c0..c7b6a92b 100644 --- a/sickgear/notifiers/prowl.py +++ b/sickgear/notifiers/prowl.py @@ -52,7 +52,7 @@ class ProwlNotifier(Notifier): if 200 != response.status: if 401 == response.status: - result = u'Authentication, %s (bad API key?)' % response.reason + result = f'Authentication, {response.reason} (bad API key?)' else: result = 'Http response code "%s"' % response.status diff --git a/sickgear/notifiers/pushalot.py b/sickgear/notifiers/pushalot.py index 50772f09..3b28526e 100644 --- a/sickgear/notifiers/pushalot.py +++ b/sickgear/notifiers/pushalot.py @@ -30,7 +30,7 @@ class PushalotNotifier(Notifier): pushalot_auth_token = self._choose(pushalot_auth_token, sickgear.PUSHALOT_AUTHORIZATIONTOKEN) - self._log_debug(u'Title: %s, Message: %s, API: %s' % (title, body, pushalot_auth_token)) + self._log_debug(f'Title: {title}, Message: {body}, API: {pushalot_auth_token}') http_handler = moves.http_client.HTTPSConnection('pushalot.com') @@ -49,7 +49,7 @@ class PushalotNotifier(Notifier): if 200 != response.status: if 410 == response.status: - result = u'Authentication, %s (bad API key?)' % response.reason + result = f'Authentication, {response.reason} (bad API key?)' else: result = 'Http response code "%s"' % response.status diff --git a/sickgear/notifiers/pushbullet.py b/sickgear/notifiers/pushbullet.py index 1057bc68..bdda0bc5 100644 --- a/sickgear/notifiers/pushbullet.py +++ b/sickgear/notifiers/pushbullet.py @@ -69,7 +69,7 @@ class PushbulletNotifier(Notifier): result = resp.json()['error']['message'] except (BaseException, Exception): result = 'no response' - self._log_warning(u'%s' % result) + self._log_warning(f'{result}') return self._choose((True, 'Failed to send notification: %s' % result)[bool(result)], not bool(result)) diff --git a/sickgear/notifiers/pytivo.py b/sickgear/notifiers/pytivo.py index b05dddaa..a380a347 100644 --- a/sickgear/notifiers/pytivo.py +++ b/sickgear/notifiers/pytivo.py @@ -66,7 +66,7 @@ class PyTivoNotifier(BaseNotifier): request_url = 'http://%s/TiVoConnect?%s' % (host, urlencode( dict(Command='Push', Container=container, File=file_path, tsn=tsn))) - self._log_debug(u'Requesting ' + request_url) + self._log_debug(f'Requesting {request_url}') request = urllib.request.Request(request_url) @@ -76,17 +76,17 @@ class PyTivoNotifier(BaseNotifier): except urllib.error.HTTPError as e: if hasattr(e, 'reason'): - self._log_error(u'Error, failed to reach a server - ' + e.reason) + self._log_error('Error, failed to reach a server - ' + e.reason) return False elif hasattr(e, 'code'): - self._log_error(u'Error, the server couldn\'t fulfill the request - ' + e.code) + self._log_error('Error, the server couldn\'t fulfill the request - ' + e.code) return False except (BaseException, Exception) as e: - self._log_error(u'Unknown exception: ' + ex(e)) + self._log_error(f'Unknown exception: {ex(e)}') return False - self._log(u'Successfully requested transfer of file') + self._log('Successfully requested transfer of file') return True diff --git a/sickgear/notifiers/synoindex.py b/sickgear/notifiers/synoindex.py index 6e4bd5a9..32f6e089 100644 --- a/sickgear/notifiers/synoindex.py +++ b/sickgear/notifiers/synoindex.py @@ -32,11 +32,11 @@ class SynoIndexNotifier(BaseNotifier): self._move_object(old_file, new_file) def _cmdline_run(self, synoindex_cmd): - self._log_debug(u'Executing command ' + str(synoindex_cmd)) - self._log_debug(u'Absolute path to command: ' + os.path.abspath(synoindex_cmd[0])) + self._log_debug(f'Executing command {str(synoindex_cmd)}') + self._log_debug(f'Absolute path to command: {os.path.abspath(synoindex_cmd[0])}') try: output, err, exit_status = cmdline_runner(synoindex_cmd) - self._log_debug(u'Script result: %s' % output) + self._log_debug(f'Script result: {output}') except (BaseException, Exception) as e: self._log_error('Unable to run synoindex: %s' % ex(e)) diff --git a/sickgear/notifiers/synologynotifier.py b/sickgear/notifiers/synologynotifier.py index 1aacb2f8..51242a04 100644 --- a/sickgear/notifiers/synologynotifier.py +++ b/sickgear/notifiers/synologynotifier.py @@ -27,11 +27,11 @@ class SynologyNotifier(Notifier): def _notify(self, title, body, **kwargs): synodsmnotify_cmd = ['/usr/syno/bin/synodsmnotify', '@administrators', title, body] - self._log(u'Executing command ' + str(synodsmnotify_cmd)) - self._log_debug(u'Absolute path to command: ' + os.path.abspath(synodsmnotify_cmd[0])) + self._log(f'Executing command {synodsmnotify_cmd}') + self._log_debug(f'Absolute path to command: {os.path.abspath(synodsmnotify_cmd[0])}') try: output, err, exit_status = cmdline_runner(synodsmnotify_cmd) - self._log_debug(u'Script result: %s' % output) + self._log_debug(f'Script result: {output}') except (BaseException, Exception) as e: self._log('Unable to run synodsmnotify: %s' % ex(e)) diff --git a/sickgear/notifiers/telegram.py b/sickgear/notifiers/telegram.py index f528864e..96d86319 100644 --- a/sickgear/notifiers/telegram.py +++ b/sickgear/notifiers/telegram.py @@ -40,8 +40,8 @@ class TelegramNotifier(Notifier): access_token = self._choose(access_token, sickgear.TELEGRAM_ACCESS_TOKEN) cid = self._choose(chatid, sickgear.TELEGRAM_CHATID) try: - msg = self._body_only(('' if not title else u'%s' % title), body) - msg = msg.replace(u'%s: ' % title, u'%s:\r\n' % ('SickGear ' + title, title)[use_icon]) + msg = self._body_only(('' if not title else f'{title}'), body) + msg = msg.replace(f'{title}: ', f'{("SickGear " + title, title)[use_icon]}:\r\n') # HTML spaces ( ) and tabs ( ) aren't supported # See https://core.telegram.org/bots/api#html-style msg = re.sub('(?i) ?', ' ', msg) diff --git a/sickgear/notifiers/xbmc.py b/sickgear/notifiers/xbmc.py index 67b0412e..8dcae9ce 100644 --- a/sickgear/notifiers/xbmc.py +++ b/sickgear/notifiers/xbmc.py @@ -102,26 +102,26 @@ class XBMCNotifier(Notifier): """ - self._log(u'Sending request to update library for host: "%s"' % host) + self._log(f'Sending request to update library for host: "{host}"') xbmcapi = self._get_xbmc_version(host, sickgear.XBMC_USERNAME, sickgear.XBMC_PASSWORD) if xbmcapi: if 4 >= xbmcapi: # try to update for just the show, if it fails, do full update if enabled if not self._update_library_http(host, show_name) and sickgear.XBMC_UPDATE_FULL: - self._log_warning(u'Single show update failed, falling back to full update') + self._log_warning('Single show update failed, falling back to full update') return self._update_library_http(host) else: return True else: # try to update for just the show, if it fails, do full update if enabled if not self._update_library_json(host, show_name) and sickgear.XBMC_UPDATE_FULL: - self._log_warning(u'Single show update failed, falling back to full update') + self._log_warning('Single show update failed, falling back to full update') return self._update_library_json(host) else: return True - self._log_debug(u'Failed to detect version for "%s", check configuration and try again' % host) + self._log_debug(f'Failed to detect version for "{host}", check configuration and try again') return False # ############################################################################# @@ -142,7 +142,7 @@ class XBMCNotifier(Notifier): """ if not host: - self._log_debug(u'No host passed, aborting update') + self._log_debug('No host passed, aborting update') return False username = self._choose(username, sickgear.XBMC_USERNAME) @@ -152,7 +152,7 @@ class XBMCNotifier(Notifier): command[key] = command[key].encode('utf-8') enc_command = urlencode(command) - self._log_debug(u'Encoded API command: ' + enc_command) + self._log_debug('Encoded API command: ' + enc_command) url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, enc_command) try: @@ -160,19 +160,19 @@ class XBMCNotifier(Notifier): # if we have a password, use authentication if password: req.add_header('Authorization', 'Basic %s' % b64encodestring('%s:%s' % (username, password))) - self._log_debug(u'Contacting (with auth header) via url: ' + url) + self._log_debug(f'Contacting (with auth header) via url: {url}') else: - self._log_debug(u'Contacting via url: ' + url) + self._log_debug(f'Contacting via url: {url}') http_response_obj = urllib.request.urlopen(req) # PY2 http_response_obj has no `with` context manager result = decode_str(http_response_obj.read(), sickgear.SYS_ENCODING) http_response_obj.close() - self._log_debug(u'HTTP response: ' + result.replace('\n', '')) + self._log_debug('HTTP response: ' + result.replace('\n', '')) return result except (urllib.error.URLError, IOError) as e: - self._log_warning(u'Couldn\'t contact HTTP at %s %s' % (url, ex(e))) + self._log_warning(f'Couldn\'t contact HTTP at {url} {ex(e)}') return False def _update_library_http(self, host=None, show_name=None): @@ -191,14 +191,14 @@ class XBMCNotifier(Notifier): """ if not host: - self._log_debug(u'No host passed, aborting update') + self._log_debug('No host passed, aborting update') return False - self._log_debug(u'Updating XMBC library via HTTP method for host: ' + host) + self._log_debug('Updating XMBC library via HTTP method for host: ' + host) # if we're doing per-show if show_name: - self._log_debug(u'Updating library via HTTP method for show ' + show_name) + self._log_debug('Updating library via HTTP method for show ' + show_name) # noinspection SqlResolve path_sql = 'select path.strPath' \ @@ -224,30 +224,30 @@ class XBMCNotifier(Notifier): self._send_to_xbmc(reset_command, host) if not sql_xml: - self._log_debug(u'Invalid response for ' + show_name + ' on ' + host) + self._log_debug('Invalid response for ' + show_name + ' on ' + host) return False enc_sql_xml = quote(sql_xml, ':\\/<>') try: et = etree.fromstring(enc_sql_xml) except SyntaxError as e: - self._log_error(u'Unable to parse XML response: ' + ex(e)) + self._log_error(f'Unable to parse XML response: {ex(e)}') return False paths = et.findall('.//field') if not paths: - self._log_debug(u'No valid paths found for ' + show_name + ' on ' + host) + self._log_debug('No valid paths found for ' + show_name + ' on ' + host) return False for path in paths: # we do not need it double-encoded, gawd this is dumb un_enc_path = decode_str(unquote(path.text), sickgear.SYS_ENCODING) - self._log_debug(u'Updating ' + show_name + ' on ' + host + ' at ' + un_enc_path) + self._log_debug('Updating ' + show_name + ' on ' + host + ' at ' + un_enc_path) update_command = dict(command='ExecBuiltIn', parameter='XBMC.updatelibrary(video, %s)' % un_enc_path) request = self._send_to_xbmc(update_command, host) if not request: - self._log_error(u'Update of show directory failed on ' + show_name + self._log_error('Update of show directory failed on ' + show_name + ' on ' + host + ' at ' + un_enc_path) return False # sleep for a few seconds just to be sure xbmc has a chance to finish each directory @@ -255,12 +255,12 @@ class XBMCNotifier(Notifier): time.sleep(5) # do a full update if requested else: - self._log(u'Doing full library update on host: ' + host) + self._log('Doing full library update on host: ' + host) update_command = {'command': 'ExecBuiltIn', 'parameter': 'XBMC.updatelibrary(video)'} request = self._send_to_xbmc(update_command, host) if not request: - self._log_error(u'Full Library update failed on: ' + host) + self._log_error('Full Library update failed on: ' + host) return False return True @@ -284,14 +284,14 @@ class XBMCNotifier(Notifier): """ if not host: - self._log_debug(u'No host passed, aborting update') + self._log_debug('No host passed, aborting update') return False username = self._choose(username, sickgear.XBMC_USERNAME) password = self._choose(password, sickgear.XBMC_PASSWORD) command = command.encode('utf-8') - self._log_debug(u'JSON command: ' + command) + self._log_debug('JSON command: ' + command) url = 'http://%s/jsonrpc' % host try: @@ -300,28 +300,28 @@ class XBMCNotifier(Notifier): # if we have a password, use authentication if password: req.add_header('Authorization', 'Basic %s' % b64encodestring('%s:%s' % (username, password))) - self._log_debug(u'Contacting (with auth header) via url: ' + url) + self._log_debug(f'Contacting (with auth header) via url: {url}') else: - self._log_debug(u'Contacting via url: ' + url) + self._log_debug(f'Contacting via url: {url}') try: http_response_obj = urllib.request.urlopen(req) # PY2 http_response_obj has no `with` context manager except urllib.error.URLError as e: - self._log_warning(u'Error while trying to retrieve API version for "%s": %s' % (host, ex(e))) + self._log_warning(f'Error while trying to retrieve API version for "{host}": {ex(e)}') return False # parse the json result try: result = json_load(http_response_obj) http_response_obj.close() - self._log_debug(u'JSON response: ' + str(result)) + self._log_debug(f'JSON response: {result}') return result # need to return response for parsing except ValueError: - self._log_warning(u'Unable to decode JSON: ' + http_response_obj) + self._log_warning('Unable to decode JSON: ' + http_response_obj) return False except IOError as e: - self._log_warning(u'Couldn\'t contact JSON API at ' + url + ' ' + ex(e)) + self._log_warning(f'Couldn\'t contact JSON API at {url} {ex(e)}') return False def _update_library_json(self, host=None, show_name=None): @@ -340,15 +340,15 @@ class XBMCNotifier(Notifier): """ if not host: - self._log_debug(u'No host passed, aborting update') + self._log_debug('No host passed, aborting update') return False - self._log(u'Updating XMBC library via JSON method for host: ' + host) + self._log('Updating XMBC library via JSON method for host: ' + host) # if we're doing per-show if show_name: tvshowid = -1 - self._log_debug(u'Updating library via JSON method for show ' + show_name) + self._log_debug('Updating library via JSON method for show ' + show_name) # get tvshowid by showName shows_command = '{"jsonrpc":"2.0","method":"VideoLibrary.GetTVShows","id":1}' @@ -357,7 +357,7 @@ class XBMCNotifier(Notifier): if shows_response and 'result' in shows_response and 'tvshows' in shows_response['result']: shows = shows_response['result']['tvshows'] else: - self._log_debug(u'No tvshows in TV show list') + self._log_debug('No tvshows in TV show list') return False for show in shows: @@ -370,7 +370,7 @@ class XBMCNotifier(Notifier): # we didn't find the show (exact match), thus revert to just doing a full update if enabled if -1 == tvshowid: - self._log_debug(u'Exact show name not matched in TV show list') + self._log_debug('Exact show name not matched in TV show list') return False # lookup tv-show path @@ -379,19 +379,19 @@ class XBMCNotifier(Notifier): path_response = self._send_to_xbmc_json(path_command, host) path = path_response['result']['tvshowdetails']['file'] - self._log_debug(u'Received Show: ' + show_name + ' with ID: ' + str(tvshowid) + ' Path: ' + path) + self._log_debug('Received Show: ' + show_name + ' with ID: ' + str(tvshowid) + ' Path: ' + path) if 1 > len(path): - self._log_warning(u'No valid path found for ' + show_name + ' with ID: ' + self._log_warning('No valid path found for ' + show_name + ' with ID: ' + str(tvshowid) + ' on ' + host) return False - self._log_debug(u'Updating ' + show_name + ' on ' + host + ' at ' + path) + self._log_debug('Updating ' + show_name + ' on ' + host + ' at ' + path) update_command = '{"jsonrpc":"2.0","method":"VideoLibrary.Scan","params":{"directory":%s},"id":1}' % ( json_dumps(path)) request = self._send_to_xbmc_json(update_command, host) if not request: - self._log_error(u'Update of show directory failed on ' + show_name + ' on ' + host + ' at ' + path) + self._log_error('Update of show directory failed on ' + show_name + ' on ' + host + ' at ' + path) return False # catch if there was an error in the returned request @@ -399,18 +399,18 @@ class XBMCNotifier(Notifier): for r in request: if 'error' in r: self._log_error( - u'Error while attempting to update show directory for ' + show_name + 'Error while attempting to update show directory for ' + show_name + ' on ' + host + ' at ' + path) return False # do a full update if requested else: - self._log(u'Doing Full Library update on host: ' + host) + self._log('Doing Full Library update on host: ' + host) update_command = '{"jsonrpc":"2.0","method":"VideoLibrary.Scan","id":1}' request = self._send_to_xbmc_json(update_command, host, sickgear.XBMC_USERNAME, sickgear.XBMC_PASSWORD) if not request: - self._log_error(u'Full Library update failed on: ' + host) + self._log_error('Full Library update failed on: ' + host) return False return True @@ -441,12 +441,12 @@ class XBMCNotifier(Notifier): for cur_host in [x.strip() for x in hosts.split(',')]: cur_host = unquote_plus(cur_host) - self._log(u'Sending notification to "%s"' % cur_host) + self._log(f'Sending notification to "{cur_host}"') xbmcapi = self._get_xbmc_version(cur_host, username, password) if xbmcapi: if 4 >= xbmcapi: - self._log_debug(u'Detected version <= 11, using HTTP API') + self._log_debug('Detected version <= 11, using HTTP API') command = dict(command='ExecBuiltIn', parameter='Notification(' + title.encode('utf-8') + ',' + body.encode('utf-8') + ')') notify_result = self._send_to_xbmc(command, cur_host, username, password) @@ -454,7 +454,7 @@ class XBMCNotifier(Notifier): result += [cur_host + ':' + str(notify_result)] success |= 'OK' in notify_result or success else: - self._log_debug(u'Detected version >= 12, using JSON API') + self._log_debug('Detected version >= 12, using JSON API') command = '{"jsonrpc":"2.0","method":"GUI.ShowNotification",' \ '"params":{"title":"%s","message":"%s", "image": "%s"},"id":1}' % \ (title.encode('utf-8'), body.encode('utf-8'), self._sg_logo_url) @@ -464,7 +464,7 @@ class XBMCNotifier(Notifier): success |= 'OK' in notify_result or success else: if sickgear.XBMC_ALWAYS_ON or self._testing: - self._log_error(u'Failed to detect version for "%s", check configuration and try again' % cur_host) + self._log_error(f'Failed to detect version for "{cur_host}", check configuration and try again') result += [cur_host + ':No response'] success = False @@ -488,7 +488,7 @@ class XBMCNotifier(Notifier): """ if not sickgear.XBMC_HOST: - self._log_debug(u'No hosts specified, check your settings') + self._log_debug('No hosts specified, check your settings') return False # either update each host, or only attempt to update until one successful result @@ -496,11 +496,11 @@ class XBMCNotifier(Notifier): for host in [x.strip() for x in sickgear.XBMC_HOST.split(',')]: if self._send_update_library(host, show_name): if sickgear.XBMC_UPDATE_ONLYFIRST: - self._log_debug(u'Successfully updated "%s", stopped sending update library commands' % host) + self._log_debug(f'Successfully updated "{host}", stopped sending update library commands') return True else: if sickgear.XBMC_ALWAYS_ON: - self._log_error(u'Failed to detect version for "%s", check configuration and try again' % host) + self._log_error(f'Failed to detect version for "{host}", check configuration and try again') result = result + 1 # needed for the 'update xbmc' submenu command diff --git a/sickgear/nzbSplitter.py b/sickgear/nzbSplitter.py index 7ac6cfe8..c4334544 100644 --- a/sickgear/nzbSplitter.py +++ b/sickgear/nzbSplitter.py @@ -73,7 +73,7 @@ def _get_season_nzbs(name, url_data, season): try: show_xml = etree.ElementTree(etree.XML(url_data)) except SyntaxError: - logger.log(u'Unable to parse the XML of %s, not splitting it' % name, logger.ERROR) + logger.error(f'Unable to parse the XML of {name}, not splitting it') return {}, '' filename = name.replace('.nzb', '') @@ -86,7 +86,7 @@ def _get_season_nzbs(name, url_data, season): if scene_name_match: show_name, quality_section = scene_name_match.groups() else: - logger.log('%s - Not a valid season pack scene name. If it\'s a valid one, log a bug.' % name, logger.ERROR) + logger.error('%s - Not a valid season pack scene name. If it\'s a valid one, log a bug.' % name) return {}, '' regex = r'(%s[\._]S%02d(?:[E0-9]+)\.[\w\._]+)' % (re.escape(show_name), season) @@ -116,7 +116,7 @@ def _get_season_nzbs(name, url_data, season): if isinstance(ext, string_types) \ and re.search(r'^\.(nzb|r\d{2}|rar|7z|zip|par2|vol\d+|nfo|srt|txt|bat|sh|mkv|mp4|avi|wmv)$', ext, flags=re.I): - logger.log('Unable to split %s into episode nzb\'s' % name, logger.WARNING) + logger.warning('Unable to split %s into episode nzb\'s' % name) return {}, '' if cur_ep not in ep_files: ep_files[cur_ep] = [cur_file] @@ -157,7 +157,7 @@ def _save_nzb(nzb_name, nzb_string): nzb_fh.write(nzb_string) except EnvironmentError as e: - logger.log(u'Unable to save NZB: ' + ex(e), logger.ERROR) + logger.error(f'Unable to save NZB: {ex(e)}') def _strip_ns(element, ns): @@ -178,7 +178,7 @@ def split_result(result): """ resp = helpers.get_url(result.url, failure_monitor=False) if None is resp: - logger.log(u'Unable to load url %s, can\'t download season NZB' % result.url, logger.ERROR) + logger.error(f'Unable to load url {result.url}, can\'t download season NZB') return False # parse the season ep name @@ -186,10 +186,10 @@ def split_result(result): np = NameParser(False, show_obj=result.show_obj) parse_result = np.parse(result.name) except InvalidNameException: - logger.log(u'Unable to parse the filename %s into a valid episode' % result.name, logger.DEBUG) + logger.debug(f'Unable to parse the filename {result.name} into a valid episode') return False except InvalidShowException: - logger.log(u'Unable to parse the filename %s into a valid show' % result.name, logger.DEBUG) + logger.debug(f'Unable to parse the filename {result.name} into a valid show') return False # bust it up @@ -201,35 +201,35 @@ def split_result(result): for new_nzb in separate_nzbs: - logger.log(u'Split out %s from %s' % (new_nzb, result.name), logger.DEBUG) + logger.debug(f'Split out {new_nzb} from {result.name}') # parse the name try: np = NameParser(False, show_obj=result.show_obj) parse_result = np.parse(new_nzb) except InvalidNameException: - logger.log(u"Unable to parse the filename %s into a valid episode" % new_nzb, logger.DEBUG) + logger.debug(f'Unable to parse the filename {new_nzb} into a valid episode') return False except InvalidShowException: - logger.log(u"Unable to parse the filename %s into a valid show" % new_nzb, logger.DEBUG) + logger.debug(f'Unable to parse the filename {new_nzb} into a valid show') return False # make sure the result is sane if (None is not parse_result.season_number and season != parse_result.season_number) \ or (None is parse_result.season_number and 1 != season): - logger.log(u'Found %s inside %s but it doesn\'t seem to belong to the same season, ignoring it' - % (new_nzb, result.name), logger.WARNING) + logger.warning(f'Found {new_nzb} inside {result.name} but it doesn\'t seem to belong to the same season,' + f' ignoring it') continue elif 0 == len(parse_result.episode_numbers): - logger.log(u'Found %s inside %s but it doesn\'t seem to be a valid episode NZB, ignoring it' - % (new_nzb, result.name), logger.WARNING) + logger.warning(f'Found {new_nzb} inside {result.name} but it doesn\'t seem to be a valid episode NZB,' + f' ignoring it') continue want_ep = True for ep_no in parse_result.episode_numbers: if not result.show_obj.want_episode(season, ep_no, result.quality): - logger.log(u'Ignoring result %s because we don\'t want an episode that is %s' - % (new_nzb, Quality.qualityStrings[result.quality]), logger.DEBUG) + logger.debug(f'Ignoring result {new_nzb} because we don\'t want an episode that is' + f' {Quality.qualityStrings[result.quality]}') want_ep = False break if not want_ep: diff --git a/sickgear/nzbget.py b/sickgear/nzbget.py index 8d54fad2..703b45bf 100644 --- a/sickgear/nzbget.py +++ b/sickgear/nzbget.py @@ -34,7 +34,7 @@ def test_nzbget(host, use_https, username, password, timeout=300): result = False if not host: msg = 'No NZBGet host found. Please configure it' - logger.log(msg, logger.ERROR) + logger.error(msg) return result, msg, None url = 'http%(scheme)s://%(username)s:%(password)s@%(host)s/xmlrpc' % { @@ -44,24 +44,24 @@ def test_nzbget(host, use_https, username, password, timeout=300): try: msg = 'Success. Connected' if rpc_client.writelog('INFO', 'SickGear connected as a test'): - logger.log(msg, logger.DEBUG) + logger.debug(msg) else: msg += ', but unable to send a message' - logger.log(msg, logger.ERROR) + logger.error(msg) result = True - logger.log(u'NZBGet URL: %s' % url, logger.DEBUG) + logger.debug(f'NZBGet URL: {url}') except moves.http_client.socket.error: msg = 'Please check NZBGet host and port (if it is running). NZBGet is not responding to these values' - logger.log(msg, logger.ERROR) + logger.error(msg) except moves.xmlrpc_client.ProtocolError as e: if 'Unauthorized' == e.errmsg: msg = 'NZBGet username or password is incorrect' - logger.log(msg, logger.ERROR) + logger.error(msg) else: msg = 'Protocol Error: %s' % e.errmsg - logger.log(msg, logger.ERROR) + logger.error(msg) return result, msg, rpc_client @@ -114,7 +114,7 @@ def send_nzb(search_result): return result nzbcontent64 = b64encodestring(data, keep_eol=True) - logger.log(u'Sending NZB to NZBGet: %s' % search_result.name) + logger.log(f'Sending NZB to NZBGet: {search_result.name}') try: # Find out if nzbget supports priority (Version 9.0+), old versions beginning with a 0.x will use the old cmd @@ -161,11 +161,11 @@ def send_nzb(search_result): nzbget_prio, False, search_result.url) if nzbget_result: - logger.log(u'NZB sent to NZBGet successfully', logger.DEBUG) + logger.debug('NZB sent to NZBGet successfully') result = True else: - logger.log(u'NZBGet could not add %s.nzb to the queue' % search_result.name, logger.ERROR) + logger.error(f'NZBGet could not add {search_result.name}.nzb to the queue') except (BaseException, Exception): - logger.log(u'Connect Error to NZBGet: could not add %s.nzb to the queue' % search_result.name, logger.ERROR) + logger.error(f'Connect Error to NZBGet: could not add {search_result.name}.nzb to the queue') return result diff --git a/sickgear/postProcessor.py b/sickgear/postProcessor.py index 48501e63..711e0bf6 100644 --- a/sickgear/postProcessor.py +++ b/sickgear/postProcessor.py @@ -111,7 +111,7 @@ class PostProcessor(object): """ logger_msg = re.sub(r'(?i)\.*', '', message) logger_msg = re.sub('(?i)]+>([^<]+)', r'\1', logger_msg) - logger.log(u'%s' % logger_msg, level) + logger.log(f'{logger_msg}', level) self.log += message + '\n' def _check_for_existing_file(self, existing_file): @@ -129,25 +129,24 @@ class PostProcessor(object): """ if not existing_file: - self._log(u'There is no existing file', logger.DEBUG) + self._log('There is no existing file', logger.DEBUG) return PostProcessor.DOESNT_EXIST # if the new file exists, return the appropriate code depending on the size if os.path.isfile(existing_file): - new_file = u'New file %s
.. is ' % self.file_path + new_file = f'New file {self.file_path}
.. is ' if os.path.getsize(self.file_path) == os.path.getsize(existing_file): - self._log(u'%sthe same size as %s' % (new_file, existing_file), logger.DEBUG) + self._log(f'{new_file}the same size as {existing_file}', logger.DEBUG) return PostProcessor.EXISTS_SAME elif os.path.getsize(self.file_path) < os.path.getsize(existing_file): - self._log(u'%ssmaller than %s' % (new_file, existing_file), logger.DEBUG) + self._log(f'{new_file}smaller than {existing_file}', logger.DEBUG) return PostProcessor.EXISTS_LARGER else: - self._log(u'%slarger than %s' % (new_file, existing_file), logger.DEBUG) + self._log(f'{new_file}larger than {existing_file}', logger.DEBUG) return PostProcessor.EXISTS_SMALLER else: - self._log(u'File doesn\'t exist %s' % existing_file, - logger.DEBUG) + self._log(f'File doesn\'t exist {existing_file}', logger.DEBUG) return PostProcessor.DOESNT_EXIST @staticmethod @@ -222,7 +221,7 @@ class PostProcessor(object): file_list = file_list + self.list_associated_files(file_path) if not file_list: - self._log(u'Not deleting anything because there are no files associated with %s' % file_path, logger.DEBUG) + self._log(f'Not deleting anything because there are no files associated with {file_path}', logger.DEBUG) return # delete the file and any other files which we want to delete @@ -234,16 +233,14 @@ class PostProcessor(object): # File is read-only, so make it writeable try: os.chmod(cur_file, stat.S_IWRITE) - self._log(u'Changed read only permissions to writeable to delete file %s' - % cur_file, logger.DEBUG) + self._log(f'Changed read only permissions to writeable to delete file {cur_file}', logger.DEBUG) except (BaseException, Exception): - self._log(u'Cannot change permissions to writeable to delete file: %s' - % cur_file, logger.WARNING) + self._log(f'Cannot change permissions to writeable to delete file: {cur_file}', logger.WARNING) removal_type = helpers.remove_file(cur_file, log_level=logger.DEBUG) if True is not os.path.isfile(cur_file): - self._log(u'%s file %s' % (removal_type, cur_file), logger.DEBUG) + self._log(f'{removal_type} file {cur_file}', logger.DEBUG) # do the library update for synoindex notifiers.NotifierFactory().get('SYNOINDEX').deleteFile(cur_file) @@ -271,7 +268,7 @@ class PostProcessor(object): """ if not action: - self._log(u'Must provide an action for the combined file operation', logger.ERROR) + self._log('Must provide an action for the combined file operation', logger.ERROR) return file_list = [file_path] @@ -281,7 +278,7 @@ class PostProcessor(object): file_list = file_list + self.list_associated_files(file_path, subtitles_only=True) if not file_list: - self._log(u'Not moving anything because there are no files associated with %s' % file_path, logger.DEBUG) + self._log(f'Not moving anything because there are no files associated with {file_path}', logger.DEBUG) return # create base name with file_path (media_file without .extension) @@ -317,7 +314,7 @@ class PostProcessor(object): subs_new_path = os.path.join(new_path, sickgear.SUBTITLES_DIR) dir_exists = helpers.make_dir(subs_new_path) if not dir_exists: - logger.log(u'Unable to create subtitles folder ' + subs_new_path, logger.ERROR) + logger.error(f'Unable to create subtitles folder {subs_new_path}') else: helpers.chmod_as_parent(subs_new_path) new_file_path = os.path.join(subs_new_path, new_file_name) @@ -345,15 +342,16 @@ class PostProcessor(object): :type action_tmpl: """ - def _int_move(cur_file_path, new_file_path, success_tmpl=u' %s to %s'): + def _int_move(cur_file_path, new_file_path, success_tmpl=' %s to %s'): try: helpers.move_file(cur_file_path, new_file_path, raise_exceptions=True) helpers.chmod_as_parent(new_file_path) - self._log(u'Moved file from' + (success_tmpl % (cur_file_path, new_file_path)), logger.DEBUG) + self._log(f'Moved file from{(success_tmpl % (cur_file_path, new_file_path))}', + logger.DEBUG) except (IOError, OSError) as e: - self._log(u'Unable to move file %s
.. %s' - % (success_tmpl % (cur_file_path, new_file_path), ex(e)), logger.ERROR) + self._log(f'Unable to move file {success_tmpl % (cur_file_path, new_file_path)}
.. {ex(e)}', + logger.ERROR) raise e self._combined_file_operation(file_path, new_path, new_base_name, associated_files, _int_move, @@ -375,15 +373,16 @@ class PostProcessor(object): :type action_tmpl: """ - def _int_copy(cur_file_path, new_file_path, success_tmpl=u' %s to %s'): + def _int_copy(cur_file_path, new_file_path, success_tmpl=' %s to %s'): try: helpers.copy_file(cur_file_path, new_file_path) helpers.chmod_as_parent(new_file_path) - self._log(u'Copied file from' + (success_tmpl % (cur_file_path, new_file_path)), logger.DEBUG) + self._log(f'Copied file from{(success_tmpl % (cur_file_path, new_file_path))}', + logger.DEBUG) except (IOError, OSError) as e: - self._log(u'Unable to copy %s
.. %s' - % (success_tmpl % (cur_file_path, new_file_path), ex(e)), logger.ERROR) + self._log(f'Unable to copy {success_tmpl % (cur_file_path, new_file_path)}
.. {ex(e)}', + logger.ERROR) raise e self._combined_file_operation(file_path, new_path, new_base_name, associated_files, _int_copy, @@ -403,15 +402,16 @@ class PostProcessor(object): :type action_tmpl: """ - def _int_hard_link(cur_file_path, new_file_path, success_tmpl=u' %s to %s'): + def _int_hard_link(cur_file_path, new_file_path, success_tmpl=' %s to %s'): try: helpers.hardlink_file(cur_file_path, new_file_path) helpers.chmod_as_parent(new_file_path) - self._log(u'Hard linked file from' + (success_tmpl % (cur_file_path, new_file_path)), logger.DEBUG) + self._log(f'Hard linked file from{(success_tmpl % (cur_file_path, new_file_path))}', + logger.DEBUG) except (IOError, OSError) as e: - self._log(u'Unable to link file %s
.. %s' - % (success_tmpl % (cur_file_path, new_file_path), ex(e)), logger.ERROR) + self._log(f'Unable to link file {success_tmpl % (cur_file_path, new_file_path)}
.. {ex(e)}', + logger.ERROR) raise e self._combined_file_operation(file_path, new_path, new_base_name, associated_files, _int_hard_link, @@ -431,16 +431,16 @@ class PostProcessor(object): :type action_tmpl: """ - def _int_move_and_sym_link(cur_file_path, new_file_path, success_tmpl=u' %s to %s'): + def _int_move_and_sym_link(cur_file_path, new_file_path, success_tmpl=' %s to %s'): try: helpers.move_and_symlink_file(cur_file_path, new_file_path) helpers.chmod_as_parent(new_file_path) - self._log(u'Moved then symbolic linked file from' + (success_tmpl % (cur_file_path, new_file_path)), + self._log(f'Moved then symbolic linked file from{(success_tmpl % (cur_file_path, new_file_path))}', logger.DEBUG) except (IOError, OSError) as e: - self._log(u'Unable to link file %s
.. %s' - % (success_tmpl % (cur_file_path, new_file_path), ex(e)), logger.ERROR) + self._log(f'Unable to link file {success_tmpl % (cur_file_path, new_file_path)}
.. {ex(e)}', + logger.ERROR) raise e self._combined_file_operation(file_path, new_path, new_base_name, associated_files, _int_move_and_sym_link, @@ -515,9 +515,9 @@ class PostProcessor(object): self.in_history = True to_return = (show_obj, season_number, episode_numbers, quality) if not show_obj: - self._log(u'Unknown show, check availability on ShowList page', logger.DEBUG) + self._log('Unknown show, check availability on ShowList page', logger.DEBUG) break - self._log(u'Found a match in history for %s' % show_obj.name, logger.DEBUG) + self._log(f'Found a match in history for {show_obj.name}', logger.DEBUG) break return to_return @@ -546,7 +546,7 @@ class PostProcessor(object): :rtype: Tuple[None, None, List, None] or Tuple[sickgear.tv.TVShow, int, List[int], int] """ - logger.log(u'Analyzing name ' + repr(name)) + logger.log(f'Analyzing name {repr(name)}') to_return = (None, None, [], None) @@ -556,8 +556,8 @@ class PostProcessor(object): # parse the name to break it into show name, season, and episode np = NameParser(resource, convert=True, show_obj=self.show_obj or show_obj) parse_result = np.parse(name) - self._log(u'Parsed %s
.. from %s' - % (decode_str(str(parse_result), errors='xmlcharrefreplace'), name), logger.DEBUG) + self._log(f'Parsed {decode_str(str(parse_result), errors="xmlcharrefreplace")}
' + f'.. from {name}', logger.DEBUG) if parse_result.is_air_by_date and (None is parse_result.season_number or not parse_result.episode_numbers): season_number = -1 @@ -598,13 +598,16 @@ class PostProcessor(object): self.release_name = helpers.remove_extension(os.path.basename(parse_result.original_name)) else: - logger.log(u'Parse result not sufficient (all following have to be set). will not save release name', - logger.DEBUG) - logger.log(u'Parse result(series_name): ' + str(parse_result.series_name), logger.DEBUG) - logger.log(u'Parse result(season_number): ' + str(parse_result.season_number), logger.DEBUG) - logger.log(u'Parse result(episode_numbers): ' + str(parse_result.episode_numbers), logger.DEBUG) - logger.log(u' or Parse result(air_date): ' + str(parse_result.air_date), logger.DEBUG) - logger.log(u'Parse result(release_group): ' + str(parse_result.release_group), logger.DEBUG) + for cur_msg in ( + 'Parse result not sufficient (all following have to be set). will not save release name', + f'Parse result(series_name): {parse_result.series_name}', + f'Parse result(season_number): {parse_result.season_number}', + f'Parse result(episode_numbers): {parse_result.episode_numbers}', + f' or Parse result(air_date): {parse_result.air_date}', + f'Parse result(release_group): {parse_result.release_group}' + ): + logger.debug(cur_msg) + def _find_info(self, history_only=False): """ @@ -632,7 +635,7 @@ class PostProcessor(object): lambda: self._analyze_name(self.file_path), # try to analyze the dir + file name together as one name - lambda: self._analyze_name(self.folder_name + u' ' + self.file_name), + lambda: self._analyze_name(f'{self.folder_name} {self.file_name}'), # try to analyze file name with previously parsed show_obj lambda: self._analyze_name(self.file_name, show_obj=show_obj, rel_grp=rel_grp)], @@ -645,7 +648,7 @@ class PostProcessor(object): try: (try_show_obj, try_season, try_episodes, try_quality) = cur_try() except (InvalidNameException, InvalidShowException) as e: - logger.log(u'Unable to parse, skipping: ' + ex(e), logger.DEBUG) + logger.debug(f'Unable to parse, skipping: {ex(e)}') continue if not try_show_obj: @@ -667,8 +670,8 @@ class PostProcessor(object): # for air-by-date shows we need to look up the season/episode from database if -1 == season_number and show_obj and episode_numbers: - self._log(u'Looks like this is an air-by-date or sports show,' - u' attempting to convert the date to season/episode', logger.DEBUG) + self._log('Looks like this is an air-by-date or sports show,' + ' attempting to convert the date to season/episode', logger.DEBUG) airdate = episode_numbers[0].toordinal() my_db = db.DBConnection() sql_result = my_db.select( @@ -681,8 +684,8 @@ class PostProcessor(object): season_number = int(sql_result[0][0]) episode_numbers = [int(sql_result[0][1])] else: - self._log(u'Unable to find episode with date %s for show %s, skipping' % - (episode_numbers[0], show_obj.tvid_prodid), logger.DEBUG) + self._log(f'Unable to find episode with date {episode_numbers[0]} for show {show_obj.tvid_prodid},' + f' skipping', logger.DEBUG) # don't leave dates in the episode list if we can't convert them to real episode numbers episode_numbers = [] continue @@ -697,8 +700,8 @@ class PostProcessor(object): [show_obj.tvid, show_obj.prodid]) if 1 == int(num_seasons_sql_result[0][0]) and None is season_number: self._log( - u'No season number found, but this show appears to only have 1 season,' - u' setting season number to 1...', logger.DEBUG) + 'No season number found, but this show appears to only have 1 season,' + ' setting season number to 1...', logger.DEBUG) season_number = 1 if show_obj and season_number and episode_numbers: @@ -731,13 +734,13 @@ class PostProcessor(object): for cur_episode_number in episode_numbers: cur_episode_number = int(cur_episode_number) - self._log(u'Retrieving episode object for %sx%s' % (season_number, cur_episode_number), logger.DEBUG) + self._log(f'Retrieving episode object for {season_number}x{cur_episode_number}', logger.DEBUG) # now that we've figured out which episode this file is just load it manually try: ep_obj = show_obj.get_episode(season_number, cur_episode_number) except exceptions_helper.EpisodeNotFoundException as e: - self._log(u'Unable to create episode: ' + ex(e), logger.DEBUG) + self._log(f'Unable to create episode: {ex(e)}', logger.DEBUG) raise exceptions_helper.PostProcessingFailed() # associate all the episodes together under a single root episode @@ -764,9 +767,8 @@ class PostProcessor(object): if ep_obj.status in common.Quality.SNATCHED_ANY: old_status, ep_quality = common.Quality.split_composite_status(ep_obj.status) if common.Quality.UNKNOWN != ep_quality: - self._log( - u'Using "%s" quality from the old status' % common.Quality.qualityStrings[ep_quality], - logger.DEBUG) + self._log(f'Using "{common.Quality.qualityStrings[ep_quality]}" quality from the old status', + logger.DEBUG) return ep_quality # search all possible names for our new quality, in case the file or dir doesn't have it @@ -780,26 +782,25 @@ class PostProcessor(object): continue ep_quality = common.Quality.name_quality(cur_name, ep_obj.show_obj.is_anime) - quality_log = u' "%s" quality parsed from the %s %s'\ - % (common.Quality.qualityStrings[ep_quality], thing, cur_name) + quality_log = f' "{common.Quality.qualityStrings[ep_quality]}" quality parsed from the {thing} {cur_name}' # if we find a good one then use it if common.Quality.UNKNOWN != ep_quality: - self._log(u'Using' + quality_log, logger.DEBUG) + self._log(f'Using{quality_log}', logger.DEBUG) return ep_quality else: - self._log(u'Found' + quality_log, logger.DEBUG) + self._log(f'Found{quality_log}', logger.DEBUG) ep_quality = common.Quality.file_quality(self.file_path) if common.Quality.UNKNOWN != ep_quality: - self._log(u'Using "%s" quality parsed from the metadata file content of %s' - % (common.Quality.qualityStrings[ep_quality], self.file_name), logger.DEBUG) + self._log(f'Using "{common.Quality.qualityStrings[ep_quality]}" quality parsed' + f' from the metadata file content of {self.file_name}', logger.DEBUG) return ep_quality # Try guessing quality from the file name ep_quality = common.Quality.assume_quality(self.file_name) - self._log(u'Using guessed "%s" quality from the file name %s' - % (common.Quality.qualityStrings[ep_quality], self.file_name), logger.DEBUG) + self._log(f'Using guessed "{common.Quality.qualityStrings[ep_quality]}" quality' + f' from the file name {self.file_name}', logger.DEBUG) return ep_quality @@ -822,7 +823,7 @@ class PostProcessor(object): try: script_cmd = [piece for piece in re.split("( |\\\".*?\\\"|'.*?')", script_name) if piece.strip()] script_cmd[0] = os.path.abspath(script_cmd[0]) - self._log(u'Absolute path to script: ' + script_cmd[0], logger.DEBUG) + self._log(f'Absolute path to script: {script_cmd[0]}', logger.DEBUG) script_cmd += [ep_obj.location, self.file_path] @@ -832,7 +833,7 @@ class PostProcessor(object): str(ep_obj.episode), str(ep_obj.airdate)] - self._log(u'Executing command ' + str(script_cmd)) + self._log(f'Executing command {script_cmd}') except (BaseException, Exception) as e: self._log('Error creating extra script command: %s' % ex(e), logger.ERROR) return @@ -843,10 +844,10 @@ class PostProcessor(object): self._log('Script result: %s' % output, logger.DEBUG) except OSError as e: - self._log(u'Unable to run extra_script: ' + ex(e), logger.ERROR) + self._log(f'Unable to run extra_script: {ex(e)}', logger.ERROR) except (BaseException, Exception) as e: - self._log(u'Unable to run extra_script: ' + ex(e), logger.ERROR) + self._log(f'Unable to run extra_script: {ex(e)}', logger.ERROR) def _run_extra_scripts(self, ep_obj): """ @@ -881,48 +882,48 @@ class PostProcessor(object): if not existing_show_path and not sickgear.CREATE_MISSING_SHOW_DIRS: # Show location does not exist, and cannot be created, marking it unsafe to proceed - self._log(u'.. marking it unsafe to proceed because show location does not exist', logger.DEBUG) + self._log('.. marking it unsafe to proceed because show location does not exist', logger.DEBUG) return False # if SickGear snatched this then assume it's safe if ep_obj.status in common.Quality.SNATCHED_ANY: - self._log(u'SickGear snatched this episode, marking it safe to replace', logger.DEBUG) + self._log('SickGear snatched this episode, marking it safe to replace', logger.DEBUG) return True old_ep_status, old_ep_quality = common.Quality.split_composite_status(ep_obj.status) # if old episode is not downloaded/archived then it's safe if common.DOWNLOADED != old_ep_status and common.ARCHIVED != old_ep_status: - self._log(u'Existing episode status is not downloaded/archived, marking it safe to replace', logger.DEBUG) + self._log('Existing episode status is not downloaded/archived, marking it safe to replace', logger.DEBUG) return True if common.ARCHIVED == old_ep_status and common.Quality.NONE == old_ep_quality: - self._log(u'Marking it unsafe to replace because the existing episode status is archived', logger.DEBUG) + self._log('Marking it unsafe to replace because the existing episode status is archived', logger.DEBUG) return False # Status downloaded. Quality/ size checks # if manual post process option is set to force_replace then it's safe if self.force_replace: - self._log(u'Force replace existing episode option is enabled, marking it safe to replace', logger.DEBUG) + self._log('Force replace existing episode option is enabled, marking it safe to replace', logger.DEBUG) return True # if the file processed is higher quality than the existing episode then it's safe if new_ep_quality > old_ep_quality: if common.Quality.UNKNOWN != new_ep_quality: - self._log(u'Existing episode status is not snatched but the episode to process appears to be better' - u' quality than existing episode, marking it safe to replace', logger.DEBUG) + self._log('Existing episode status is not snatched but the episode to process appears to be better' + ' quality than existing episode, marking it safe to replace', logger.DEBUG) return True else: - self._log(u'Marking it unsafe to replace because an existing episode exists in the database and' - u' the episode to process has unknown quality', logger.DEBUG) + self._log('Marking it unsafe to replace because an existing episode exists in the database and' + ' the episode to process has unknown quality', logger.DEBUG) return False existing_file_status = self._check_for_existing_file(ep_obj.location) if PostProcessor.DOESNT_EXIST == existing_file_status \ and (existing_show_path or sickgear.CREATE_MISSING_SHOW_DIRS): - self._log(u'.. there is no file to replace, marking it safe to continue', logger.DEBUG) + self._log('.. there is no file to replace, marking it safe to continue', logger.DEBUG) return True # if there's an existing downloaded file with same quality, check filesize to decide @@ -946,48 +947,47 @@ class PostProcessor(object): npr.is_anime, check_is_repack=True) if new_proper_level > cur_proper_level and \ (not is_repack or npr.release_group == ep_obj.release_group): - self._log(u'Proper or repack with same quality, marking it safe to replace', logger.DEBUG) + self._log('Proper or repack with same quality, marking it safe to replace', logger.DEBUG) return True - self._log(u'An episode exists in the database with the same quality as the episode to process', - logger.DEBUG) + self._log('An episode exists in the database with the same quality as the episode to process', logger.DEBUG) - self._log(u'Checking size of existing file ' + ep_obj.location, logger.DEBUG) + self._log(f'Checking size of existing file {ep_obj.location}', logger.DEBUG) if PostProcessor.EXISTS_SMALLER == existing_file_status: # File exists and new file is larger, marking it safe to replace - self._log(u'.. the existing smaller file will be replaced', logger.DEBUG) + self._log('.. the existing smaller file will be replaced', logger.DEBUG) return True elif PostProcessor.EXISTS_LARGER == existing_file_status: # File exists and new file is smaller, marking it unsafe to replace - self._log(u'.. marking it unsafe to replace the existing larger file', logger.DEBUG) + self._log('.. marking it unsafe to replace the existing larger file', logger.DEBUG) return False elif PostProcessor.EXISTS_SAME == existing_file_status: # File exists and new file is same size, marking it unsafe to replace - self._log(u'.. marking it unsafe to replace the existing same size file', logger.DEBUG) + self._log('.. marking it unsafe to replace the existing same size file', logger.DEBUG) return False else: - self._log(u'Unknown file status for: %s This should never happen, please log this as a bug.' - % ep_obj.location, logger.ERROR) + self._log(f'Unknown file status for: {ep_obj.location}' + f' This should never happen, please log this as a bug.', logger.ERROR) return False # if there's an existing file with better quality if old_ep_quality > new_ep_quality and old_ep_quality != common.Quality.UNKNOWN: # Episode already exists in database and processed episode has lower quality, marking it unsafe to replace - self._log(u'Marking it unsafe to replace the episode that already exists in database with a file of lower' - u' quality', logger.DEBUG) + self._log('Marking it unsafe to replace the episode that already exists in database with a file of lower' + ' quality', logger.DEBUG) return False if self.in_history: - self._log(u'SickGear snatched this episode, marking it safe to replace', logger.DEBUG) + self._log('SickGear snatched this episode, marking it safe to replace', logger.DEBUG) return True # None of the conditions were met, marking it unsafe to replace - self._log(u'Marking it unsafe to replace because no positive condition is met, you may force replace but it' - u' would be better to examine the files', logger.DEBUG) + self._log('Marking it unsafe to replace because no positive condition is met, you may force replace but it' + ' would be better to examine the files', logger.DEBUG) return False def _change_ep_objs(self, show_obj, season_number, episode_numbers, quality): @@ -998,7 +998,7 @@ class PostProcessor(object): for cur_ep_obj in [ep_obj] + ep_obj.related_ep_obj: with cur_ep_obj.lock: if self.release_name: - self._log(u'Found release name ' + self.release_name, logger.DEBUG) + self._log(f'Found release name {self.release_name}', logger.DEBUG) cur_ep_obj.release_name = self.release_name or '' @@ -1044,7 +1044,7 @@ class PostProcessor(object): self._log('Successfully processed.', logger.MESSAGE) else: - self._log('Can\'t figure out what show/episode to process', logger.WARNING) + self._log("Can't figure out what show/episode to process", logger.WARNING) raise exceptions_helper.PostProcessingFailed() def process(self): @@ -1054,16 +1054,16 @@ class PostProcessor(object): :rtype: bool """ - self._log(u'Processing... %s%s' % (os.path.relpath(self.file_path, self.folder_path), - (u'
.. from nzb %s' % self.nzb_name, u'')[None is self.nzb_name])) + self._log(f'Processing... {os.path.relpath(self.file_path, self.folder_path)}' + f'{(f"
.. from nzb {self.nzb_name}", "")[None is self.nzb_name]}') if os.path.isdir(self.file_path): - self._log(u'Expecting file %s
.. is actually a directory, skipping' % self.file_path) + self._log(f'Expecting file {self.file_path}
.. is actually a directory, skipping') return False for ignore_file in self.IGNORED_FILESTRINGS: if ignore_file in self.file_path: - self._log(u'File %s
.. is ignored type, skipping' % self.file_path) + self._log(f'File {self.file_path}
.. is ignored type, skipping') return False # reset per-file stuff @@ -1075,10 +1075,10 @@ class PostProcessor(object): # if we don't have it then give up if not show_obj: - self._log(u'Must add show to SickGear before trying to post process an episode', logger.WARNING) + self._log('Must add show to SickGear before trying to post process an episode', logger.WARNING) raise exceptions_helper.PostProcessingFailed() elif None is season_number or not episode_numbers: - self._log(u'Quitting this post process, could not determine what episode this is', logger.DEBUG) + self._log('Quitting this post process, could not determine what episode this is', logger.DEBUG) return False # retrieve/create the corresponding TVEpisode objects @@ -1089,12 +1089,12 @@ class PostProcessor(object): new_ep_quality = self._get_quality(ep_obj) else: new_ep_quality = quality - self._log(u'Using "%s" quality' % common.Quality.qualityStrings[new_ep_quality], logger.DEBUG) + self._log(f'Using "{common.Quality.qualityStrings[new_ep_quality]}" quality', logger.DEBUG) # see if it's safe to replace existing episode (is download snatched, PROPER, better quality) if not self._safe_replace(ep_obj, new_ep_quality): # if it's not safe to replace, stop here - self._log(u'Quitting this post process', logger.DEBUG) + self._log('Quitting this post process', logger.DEBUG) return False # delete the existing file (and company) @@ -1107,7 +1107,7 @@ class PostProcessor(object): helpers.delete_empty_folders(os.path.dirname(cur_ep_obj.location), keep_dir=ep_obj.show_obj.location) except (OSError, IOError): - raise exceptions_helper.PostProcessingFailed(u'Unable to delete existing files') + raise exceptions_helper.PostProcessingFailed('Unable to delete existing files') # set the status of the episodes # for cur_ep_obj in [ep_obj] + ep_obj.related_ep_obj: @@ -1115,14 +1115,14 @@ class PostProcessor(object): # if the show directory doesn't exist then make it if allowed if not os.path.isdir(ep_obj.show_obj.location) and sickgear.CREATE_MISSING_SHOW_DIRS: - self._log(u'Show directory does not exist, creating it', logger.DEBUG) + self._log('Show directory does not exist, creating it', logger.DEBUG) try: os.mkdir(ep_obj.show_obj.location) # do the library update for synoindex notifiers.NotifierFactory().get('SYNOINDEX').addFolder(ep_obj.show_obj.location) except (OSError, IOError): - raise exceptions_helper.PostProcessingFailed(u'Unable to create show directory: ' - + ep_obj.show_obj.location) + raise exceptions_helper.PostProcessingFailed(f'Unable to create show directory:' + f' {ep_obj.show_obj.location}') # get metadata for the show (but not episode because it hasn't been fully processed) ep_obj.show_obj.write_metadata(True) @@ -1132,7 +1132,7 @@ class PostProcessor(object): # Just want to keep this consistent for failed handling right now release_name = show_name_helpers.determine_release_name(self.folder_path, self.nzb_name) if None is release_name: - self._log(u'No snatched release found in history', logger.WARNING) + self._log('No snatched release found in history', logger.WARNING) elif sickgear.USE_FAILED_DOWNLOADS: failed_history.remove_failed(release_name) @@ -1144,13 +1144,13 @@ class PostProcessor(object): except exceptions_helper.ShowDirNotFoundException: raise exceptions_helper.PostProcessingFailed( - u'Unable to post process an episode because the show dir does not exist, quitting') + 'Unable to post process an episode because the show dir does not exist, quitting') - self._log(u'Destination folder for this episode is ' + dest_path, logger.DEBUG) + self._log(f'Destination folder for this episode is {dest_path}', logger.DEBUG) # create any folders we need if not helpers.make_path(dest_path, syno=True): - raise exceptions_helper.PostProcessingFailed(u'Unable to create destination folder: ' + dest_path) + raise exceptions_helper.PostProcessingFailed(f'Unable to create destination folder: {dest_path}') # figure out the base name of the resulting episode file if sickgear.RENAME_EPISODES: @@ -1174,7 +1174,7 @@ class PostProcessor(object): while not stop_event.is_set(): stop_event.wait(60) webh('.') - webh(u'\n') + webh('\n') keepalive_stop = threading.Event() keepalive = threading.Thread(target=keep_alive, args=(self.webhandler, keepalive_stop)) @@ -1185,7 +1185,7 @@ class PostProcessor(object): 'new_base_name': new_base_name, 'associated_files': sickgear.MOVE_ASSOCIATED_FILES} args_cpmv = {'subtitles': sickgear.USE_SUBTITLES and ep_obj.show_obj.subtitles, - 'action_tmpl': u' %s
.. to %s'} + 'action_tmpl': ' %s
.. to %s'} args_cpmv.update(args_link) if self.webhandler: self.webhandler('Processing method is "%s"' % self.process_method) @@ -1199,10 +1199,10 @@ class PostProcessor(object): elif 'symlink' == self.process_method: self._move_and_symlink(**args_link) else: - logger.log(u'Unknown process method: ' + str(self.process_method), logger.ERROR) - raise exceptions_helper.PostProcessingFailed(u'Unable to move the files to the new location') + logger.error(f'Unknown process method: {self.process_method}') + raise exceptions_helper.PostProcessingFailed('Unable to move the files to the new location') except (OSError, IOError): - raise exceptions_helper.PostProcessingFailed(u'Unable to move the files to the new location') + raise exceptions_helper.PostProcessingFailed('Unable to move the files to the new location') finally: if self.webhandler: # stop the keep_alive diff --git a/sickgear/processTV.py b/sickgear/processTV.py index 78fff9fd..f6a0a5bb 100644 --- a/sickgear/processTV.py +++ b/sickgear/processTV.py @@ -70,7 +70,7 @@ class ProcessTVShow(object): @property def result(self, pre=True): # type: (bool) -> AnyStr - return (('
', u'\n')[pre]).join(self._output) + return (('
', '\n')[pre]).join(self._output) def _buffer(self, text=None): if None is not text: @@ -78,7 +78,7 @@ class ProcessTVShow(object): if self.webhandler: logger_msg = re.sub(r'(?i)', '\n', text) logger_msg = re.sub('(?i)]+>([^<]+)', r'\1', logger_msg) - self.webhandler('%s%s' % (logger_msg, u'\n')) + self.webhandler('%s%s' % (logger_msg, '\n')) def _log_helper(self, message, log_level=logger.DEBUG): """ @@ -90,7 +90,7 @@ class ProcessTVShow(object): """ logger_msg = re.sub(r'(?i)\.*', '', message) logger_msg = re.sub('(?i)]+>([^<]+)', r'\1', logger_msg) - logger.log(u'%s' % logger_msg, log_level) + logger.log(f'{logger_msg}', log_level) self._buffer(message) return @@ -136,14 +136,14 @@ class ProcessTVShow(object): try: shutil.rmtree(folder) except (OSError, IOError) as e: - logger.log(u'Warning: unable to delete folder: %s: %s' % (folder, ex(e)), logger.WARNING) + logger.warning(f'Warning: unable to delete folder: {folder}: {ex(e)}') return False if os.path.isdir(folder): - logger.log(u'Warning: unable to delete folder: %s' % folder, logger.WARNING) + logger.warning(f'Warning: unable to delete folder: {folder}') return False - self._log_helper(u'Deleted folder ' + folder, logger.MESSAGE) + self._log_helper(f'Deleted folder {folder}', logger.MESSAGE) return True def _delete_files(self, process_path, notwanted_files, force=False): @@ -170,18 +170,18 @@ class ProcessTVShow(object): file_attribute = os.stat(cur_file_path)[0] if not file_attribute & stat.S_IWRITE: # File is read-only, so make it writeable - self._log_helper(u'Changing ReadOnly flag for file ' + cur_file) + self._log_helper(f'Changing ReadOnly flag for file {cur_file}') try: os.chmod(cur_file_path, stat.S_IWRITE) except OSError as e: - self._log_helper(u'Cannot change permissions of %s: %s' % (cur_file_path, ex(e))) + self._log_helper(f'Cannot change permissions of {cur_file_path}: {ex(e)}') removal_type = helpers.remove_file(cur_file_path) if os.path.isfile(cur_file_path): result = False else: - self._log_helper(u'%s file %s' % (removal_type, cur_file)) + self._log_helper(f'{removal_type} file {cur_file}') return result @@ -209,7 +209,7 @@ class ProcessTVShow(object): show_obj = helpers.find_show_by_id({int(sql_result[-1]['indexer']): int(sql_result[-1]['showid'])}, check_multishow=True) if hasattr(show_obj, 'name'): - logger.log('Found Show: %s in snatch history for: %s' % (show_obj.name, name), logger.DEBUG) + logger.debug('Found Show: %s in snatch history for: %s' % (show_obj.name, name)) except MultipleShowObjectsException: show_obj = None return show_obj @@ -319,19 +319,19 @@ class ProcessTVShow(object): elif dir_name and sickgear.TV_DOWNLOAD_DIR and os.path.isdir(sickgear.TV_DOWNLOAD_DIR)\ and os.path.normpath(dir_name) != os.path.normpath(sickgear.TV_DOWNLOAD_DIR): dir_name = os.path.join(sickgear.TV_DOWNLOAD_DIR, os.path.abspath(dir_name).split(os.path.sep)[-1]) - self._log_helper(u'SickGear PP Config, completed TV downloads folder: ' + sickgear.TV_DOWNLOAD_DIR) + self._log_helper(f'SickGear PP Config, completed TV downloads folder: {sickgear.TV_DOWNLOAD_DIR}') if dir_name: - self._log_helper(u'Checking folder... ' + dir_name) + self._log_helper(f'Checking folder... {dir_name}') # if we didn't find a real directory then process "failed" or just quit if not dir_name or not os.path.isdir(dir_name): if nzb_name and failed: self._process_failed(dir_name, nzb_name, show_obj=show_obj) else: - self._log_helper(u'Unable to figure out what folder to process. ' + - u'If your downloader and SickGear aren\'t on the same PC then make sure ' + - u'you fill out your completed TV download folder in the PP config.') + self._log_helper('Unable to figure out what folder to process. ' + 'If your downloader and SickGear aren\'t on the same PC then make sure ' + 'you fill out your completed TV download folder in the PP config.') return self.result parent = self.find_parent(dir_name) @@ -352,13 +352,13 @@ class ProcessTVShow(object): path, dirs, files = self._get_path_dir_files(dir_name, nzb_name, pp_type) if sickgear.POSTPONE_IF_SYNC_FILES and any(filter(helpers.is_sync_file, files)): - self._log_helper(u'Found temporary sync files, skipping post process', logger.ERROR) + self._log_helper('Found temporary sync files, skipping post process', logger.ERROR) return self.result if not process_method: process_method = sickgear.PROCESS_METHOD - self._log_helper(u'Processing folder... %s' % path) + self._log_helper(f'Processing folder... {path}') work_files = [] joined = self.join(path) @@ -380,13 +380,13 @@ class ProcessTVShow(object): work_files += [os.path.join(path, item) for item in rar_content] if 0 < len(files): - self._log_helper(u'Process file%s: %s' % (helpers.maybe_plural(files), str(files))) + self._log_helper(f'Process file{helpers.maybe_plural(files)}: {str(files)}') if 0 < len(video_files): - self._log_helper(u'Process video file%s: %s' % (helpers.maybe_plural(video_files), str(video_files))) + self._log_helper(f'Process video file{helpers.maybe_plural(video_files)}: {str(video_files)}') if 0 < len(rar_content): - self._log_helper(u'Process rar content: ' + str(rar_content)) + self._log_helper(f'Process rar content: {rar_content}') if 0 < len(video_in_rar): - self._log_helper(u'Process video%s in rar: %s' % (helpers.maybe_plural(video_in_rar), str(video_in_rar))) + self._log_helper(f'Process video{helpers.maybe_plural(video_in_rar)} in rar: {str(video_in_rar)}') # If nzb_name is set and there's more than one videofile in the folder, files will be lost (overwritten). nzb_name_original = nzb_name @@ -425,8 +425,7 @@ class ProcessTVShow(object): force, force_replace, use_trash=cleanup, show_obj=show_obj) except OSError as e: - logger.log('Batch skipped, %s%s' % - (ex(e), e.filename and (' (file %s)' % e.filename) or ''), logger.WARNING) + logger.warning('Batch skipped, %s%s' % (ex(e), e.filename and (' (file %s)' % e.filename) or '')) # Process video files in TV subdirectories for directory in [x for x in dirs if self._validate_dir( @@ -438,7 +437,7 @@ class ProcessTVShow(object): for walk_path, walk_dir, files in os.walk(os.path.join(path, directory), topdown=False): if sickgear.POSTPONE_IF_SYNC_FILES and any(filter(helpers.is_sync_file, files)): - self._log_helper(u'Found temporary sync files, skipping post process', logger.ERROR) + self._log_helper('Found temporary sync files, skipping post process', logger.ERROR) return self.result parent = self.find_parent(walk_path) @@ -493,8 +492,7 @@ class ProcessTVShow(object): self.check_video_filenames(walk_dir, video_pick))) except OSError as e: - logger.log('Batch skipped, %s%s' % - (ex(e), e.filename and (' (file %s)' % e.filename) or ''), logger.WARNING) + logger.warning(f'Batch skipped, {ex(e)}{e.filename and (" (file %s)" % e.filename) or ""}') if process_method in ('hardlink', 'symlink') and video_in_rar: self._delete_files(walk_path, rar_content) @@ -526,12 +524,13 @@ class ProcessTVShow(object): if self.any_vid_processed: if not self.files_failed: - _bottom_line(u'Successfully processed.', logger.MESSAGE) + _bottom_line('Successfully processed.', logger.MESSAGE) else: - _bottom_line(u'Successfully processed at least one video file%s.' % - (', others were skipped', ' and skipped another')[1 == self.files_failed], logger.MESSAGE) + _bottom_line(f'Successfully processed at least one video file' + f'{(", others were skipped", " and skipped another")[1 == self.files_failed]}.', + logger.MESSAGE) else: - _bottom_line(u'Failed! Did not process any files.', logger.WARNING) + _bottom_line('Failed! Did not process any files.', logger.WARNING) return self.result @@ -599,16 +598,16 @@ class ProcessTVShow(object): :return: success :rtype: bool """ - self._log_helper(u'Processing sub dir: ' + dir_name) + self._log_helper(f'Processing sub dir: {dir_name}') if os.path.basename(dir_name).startswith('_FAILED_'): - self._log_helper(u'The directory name indicates it failed to extract.') + self._log_helper('The directory name indicates it failed to extract.') failed = True elif os.path.basename(dir_name).startswith('_UNDERSIZED_'): - self._log_helper(u'The directory name indicates that it was previously rejected for being undersized.') + self._log_helper('The directory name indicates that it was previously rejected for being undersized.') failed = True elif os.path.basename(dir_name).upper().startswith('_UNPACK'): - self._log_helper(u'The directory name indicates that this release is in the process of being unpacked.') + self._log_helper('The directory name indicates that this release is in the process of being unpacked.') return False if failed: @@ -616,7 +615,7 @@ class ProcessTVShow(object): return False if helpers.is_hidden_folder(dir_name): - self._log_helper(u'Ignoring hidden folder: ' + dir_name) + self._log_helper(f'Ignoring hidden folder: {dir_name}') return False # make sure the directory isn't inside a show directory @@ -626,9 +625,7 @@ class ProcessTVShow(object): for cur_result in sql_result: if dir_name.lower().startswith(os.path.realpath(cur_result['location']).lower() + os.sep) \ or dir_name.lower() == os.path.realpath(cur_result['location']).lower(): - self._log_helper( - u'Found an episode that has already been moved to its show dir, skipping', - logger.ERROR) + self._log_helper('Found an episode that has already been moved to its show dir, skipping', logger.ERROR) return False # Get the videofile list for the next checks @@ -686,16 +683,16 @@ class ProcessTVShow(object): if sickgear.UNPACK and rar_files: - self._log_helper(u'Packed releases detected: ' + str(rar_files)) + self._log_helper(f'Packed releases detected: {rar_files}') for archive in rar_files: - self._log_helper(u'Unpacking archive: ' + archive) + self._log_helper(f'Unpacking archive: {archive}') try: rar_handle = rarfile.RarFile(os.path.join(path, archive)) except (BaseException, Exception): - self._log_helper(u'Failed to open archive: %s' % archive, logger.ERROR) + self._log_helper(f'Failed to open archive: {archive}', logger.ERROR) self._set_process_success(False) continue try: @@ -704,8 +701,7 @@ class ProcessTVShow(object): for file_in_archive in [os.path.basename(x.filename) for x in rar_handle.infolist() if not x.is_dir()]: if self._already_postprocessed(path, file_in_archive, force): - self._log_helper( - u'Archive file already processed, extraction skipped: ' + file_in_archive) + self._log_helper(f'Archive file already processed, extraction skipped: {file_in_archive}') skip_file = True break @@ -719,14 +715,14 @@ class ProcessTVShow(object): renamed = self.cleanup_names(path, rar_content) cur_unpacked = rar_content if not renamed else \ (list(set(rar_content) - set(iterkeys(renamed))) + list(renamed.values())) - self._log_helper(u'Unpacked content: [u\'%s\']' % '\', u\''.join(map(text_type, cur_unpacked))) + self._log_helper('Unpacked content: ["%s"]' % '", "'.join(map(text_type, cur_unpacked))) unpacked_files += cur_unpacked except (rarfile.PasswordRequired, rarfile.RarWrongPassword): - self._log_helper(u'Failed to unpack archive PasswordRequired: %s' % archive, logger.ERROR) + self._log_helper(f'Failed to unpack archive PasswordRequired: {archive}', logger.ERROR) self._set_process_success(False) self.fail_detected = True except (BaseException, Exception): - self._log_helper(u'Failed to unpack archive: %s' % archive, logger.ERROR) + self._log_helper(f'Failed to unpack archive: {archive}', logger.ERROR) self._set_process_success(False) finally: rar_handle.close() @@ -738,11 +734,11 @@ class ProcessTVShow(object): try: rar_handle = rarfile.RarFile(os.path.join(path, archive)) except (BaseException, Exception): - self._log_helper(u'Failed to open archive: %s' % archive, logger.ERROR) + self._log_helper(f'Failed to open archive: {archive}', logger.ERROR) continue try: if rar_handle.needs_password(): - self._log_helper(u'Failed to unpack archive PasswordRequired: %s' % archive, logger.ERROR) + self._log_helper(f'Failed to unpack archive PasswordRequired: {archive}', logger.ERROR) self._set_process_success(False) self.failure_detected = True rar_handle.close() @@ -813,7 +809,7 @@ class ProcessTVShow(object): is_renamed[os.path.relpath(file_path, directory)] = \ os.path.relpath(new_filename + file_extension, directory) except OSError as _e: - logger.log('Error unable to rename file "%s" because %s' % (cur_filename, ex(_e)), logger.ERROR) + logger.error('Error unable to rename file "%s" because %s' % (cur_filename, ex(_e))) elif helpers.has_media_ext(cur_filename) and \ None is not garbage_name.search(file_name) and None is not media_pattern.search(base_name): _num_videos += 1 @@ -836,7 +832,7 @@ class ProcessTVShow(object): os.rename(old_name, new_name) is_renamed[os.path.relpath(old_name, directory)] = os.path.relpath(new_name, directory) except OSError as e: - logger.log('Error unable to rename file "%s" because %s' % (old_name, ex(e)), logger.ERROR) + logger.error('Error unable to rename file "%s" because %s' % (old_name, ex(e))) return is_renamed @@ -876,7 +872,7 @@ class ProcessTVShow(object): try: os.rename(base_filepath, outfile) except OSError: - logger.log('Error unable to rename file %s' % base_filepath, logger.ERROR) + logger.error('Error unable to rename file %s' % base_filepath) return result chunk_set.append(outfile) chunk_set.sort() @@ -957,8 +953,8 @@ class ProcessTVShow(object): my_db = db.DBConnection() sql_result = my_db.select('SELECT * FROM tv_episodes WHERE release_name = ?', [dir_name]) if sql_result: - self._log_helper(u'Found a release directory %s that has already been processed,
.. skipping: %s' - % (showlink, dir_name)) + self._log_helper(f'Found a release directory {showlink} that has already been processed,
' + f'.. skipping: {dir_name}') if ep_detail_sql: reset_status(parse_result.show_obj.tvid, parse_result.show_obj.prodid, @@ -972,8 +968,8 @@ class ProcessTVShow(object): sql_result = my_db.select( 'SELECT * FROM tv_episodes WHERE release_name = ?', [videofile.rpartition('.')[0]]) if sql_result: - self._log_helper(u'Found a video, but that release %s was already processed,
.. skipping: %s' - % (showlink, videofile)) + self._log_helper(f'Found a video, but that release {showlink} was already processed,
' + f'.. skipping: {videofile}') if ep_detail_sql: reset_status(parse_result.show_obj.tvid, parse_result.show_obj.prodid, @@ -991,10 +987,10 @@ class ProcessTVShow(object): + ' and tv_episodes.status IN (%s)' % ','.join([str(x) for x in common.Quality.DOWNLOADED])\ + ' and history.resource LIKE ?' - sql_result = my_db.select(search_sql, [u'%' + videofile]) + sql_result = my_db.select(search_sql, [f'%{videofile}']) if sql_result: - self._log_helper(u'Found a video, but the episode %s is already processed,
.. skipping: %s' - % (showlink, videofile)) + self._log_helper(f'Found a video, but the episode {showlink} is already processed,
' + f'.. skipping: {videofile}') if ep_detail_sql: reset_status(parse_result.show_obj.tvid, parse_result.show_obj.prodid, @@ -1051,7 +1047,7 @@ class ProcessTVShow(object): process_fail_message = '' except exceptions_helper.PostProcessingFailed: file_success = False - process_fail_message = '
.. Post Processing Failed' + process_fail_message = '
.. Post Processing Failed' self._set_process_success(file_success) @@ -1059,13 +1055,11 @@ class ProcessTVShow(object): self._buffer(processor.log.strip('\n')) if file_success: - self._log_helper(u'Successfully processed ' + cur_video_file, logger.MESSAGE) + self._log_helper(f'Successfully processed {cur_video_file}', logger.MESSAGE) elif self.any_vid_processed: - self._log_helper(u'Warning fail for %s%s' % (cur_video_file_path, process_fail_message), - logger.WARNING) + self._log_helper(f'Warning fail for {cur_video_file_path}{process_fail_message}', logger.WARNING) else: - self._log_helper(u'Did not use file %s%s' % (cur_video_file_path, process_fail_message), - logger.WARNING) + self._log_helper(f'Did not use file {cur_video_file_path}{process_fail_message}', logger.WARNING) @staticmethod def _get_path_dir_files(dir_name, nzb_name, pp_type): @@ -1131,13 +1125,12 @@ class ProcessTVShow(object): if sickgear.DELETE_FAILED and self.any_vid_processed: self._delete_folder(dir_name, check_empty=False) - task = u'Failed download processing' + task = 'Failed download processing' if self.any_vid_processed: - self._log_helper(u'Successful %s: (%s, %s)' - % (task.lower(), str(nzb_name), dir_name), logger.MESSAGE) + self._log_helper(f'Successful {task.lower()}: ({str(nzb_name)}, {dir_name})', logger.MESSAGE) else: - self._log_helper(u'%s failed: (%s, %s): %s' - % (task, str(nzb_name), dir_name, process_fail_message), logger.WARNING) + self._log_helper(f'{task} failed: ({str(nzb_name)}, {dir_name}): {process_fail_message}', + logger.WARNING) def process_minimal(self, nzb_name, show_obj, failed, webhandler): if failed: diff --git a/sickgear/properFinder.py b/sickgear/properFinder.py index 9d66fd5d..12a07326 100644 --- a/sickgear/properFinder.py +++ b/sickgear/properFinder.py @@ -185,7 +185,7 @@ def load_webdl_types(): try: for line in url_data.splitlines(): try: - (key, val) = line.strip().split(u'::', 1) + (key, val) = line.strip().split('::', 1) except (BaseException, Exception): continue if None is key or None is val: @@ -218,10 +218,10 @@ def _search_provider(cur_provider, provider_propers, aired_since_shows, recent_s provider_propers.extend(cur_provider.find_propers(search_date=aired_since_shows, shows=recent_shows, anime=recent_anime)) except AuthException as e: - logger.log('Authentication error: %s' % ex(e), logger.ERROR) + logger.error('Authentication error: %s' % ex(e)) except (BaseException, Exception) as e: - logger.log('Error while searching %s, skipping: %s' % (cur_provider.name, ex(e)), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('Error while searching %s, skipping: %s' % (cur_provider.name, ex(e))) + logger.error(traceback.format_exc()) if not provider_propers: logger.log('No Proper releases found at [%s]' % cur_provider.name) @@ -306,8 +306,8 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime cur_proper.parsed_show_obj = (cur_proper.parsed_show_obj or helpers.find_show_by_id(parse_result.show_obj.tvid_prodid)) if None is cur_proper.parsed_show_obj: - logger.log('Skip download; cannot find show with ID [%s] at %s' % - (cur_proper.prodid, sickgear.TVInfoAPI(cur_proper.tvid).name), logger.ERROR) + logger.error('Skip download; cannot find show with ID [%s] at %s' % + (cur_proper.prodid, sickgear.TVInfoAPI(cur_proper.tvid).name)) continue cur_proper.tvid = cur_proper.parsed_show_obj.tvid @@ -319,26 +319,25 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime # only get anime Proper if it has release group and version if parse_result.is_anime and not parse_result.release_group and -1 == parse_result.version: - logger.log('Ignored Proper with no release group and version in name [%s]' % cur_proper.name, - logger.DEBUG) + logger.debug('Ignored Proper with no release group and version in name [%s]' % cur_proper.name) continue if not show_name_helpers.pass_wordlist_checks(cur_proper.name, parse=False, indexer_lookup=False, show_obj=cur_proper.parsed_show_obj): - logger.log('Ignored unwanted Proper [%s]' % cur_proper.name, logger.DEBUG) + logger.debug('Ignored unwanted Proper [%s]' % cur_proper.name) continue re_x = dict(re_prefix='.*', re_suffix='.*') result = show_name_helpers.contains_any(cur_proper.name, cur_proper.parsed_show_obj.rls_ignore_words, rx=cur_proper.parsed_show_obj.rls_ignore_words_regex, **re_x) if None is not result and result: - logger.log('Ignored Proper containing ignore word [%s]' % cur_proper.name, logger.DEBUG) + logger.debug('Ignored Proper containing ignore word [%s]' % cur_proper.name) continue result = show_name_helpers.contains_any(cur_proper.name, cur_proper.parsed_show_obj.rls_require_words, rx=cur_proper.parsed_show_obj.rls_require_words_regex, **re_x) if None is not result and not result: - logger.log('Ignored Proper for not containing any required word [%s]' % cur_proper.name, logger.DEBUG) + logger.debug('Ignored Proper for not containing any required word [%s]' % cur_proper.name) continue cur_size = getattr(cur_proper, 'size', None) @@ -419,15 +418,15 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime old_webdl_type = get_webdl_type(old_extra_no_name, old_name) new_webdl_type = get_webdl_type(parse_result.extra_info_no_name(), cur_proper.name) if old_webdl_type != new_webdl_type: - logger.log('Ignored Proper webdl source [%s], does not match existing webdl source [%s] for [%s]' - % (old_webdl_type, new_webdl_type, cur_proper.name), logger.DEBUG) + logger.debug(f'Ignored Proper webdl source [{old_webdl_type}], does not match existing webdl source' + f' [{new_webdl_type}] for [{cur_proper.name}]') continue # for webdls, prevent Propers from different groups log_same_grp = 'Ignored Proper from release group [%s] does not match existing group [%s] for [%s]' \ % (parse_result.release_group, old_release_group, cur_proper.name) if sickgear.PROPERS_WEBDL_ONEGRP and is_web and not same_release_group: - logger.log(log_same_grp, logger.DEBUG) + logger.debug(log_same_grp) continue # check if we actually want this Proper (if it's the right release group and a higher version) @@ -436,7 +435,7 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime if not (-1 < old_version < parse_result.version): continue if not same_release_group: - logger.log(log_same_grp, logger.DEBUG) + logger.debug(log_same_grp) continue found_msg = 'Found anime Proper v%s to replace v%s' % (parse_result.version, old_version) else: @@ -454,7 +453,7 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime # skip if the episode has never downloaded, because a previous quality is required to match the Proper if not len(history_results): - logger.log('Ignored Proper cannot find a recent history item for [%s]' % cur_proper.name, logger.DEBUG) + logger.debug('Ignored Proper cannot find a recent history item for [%s]' % cur_proper.name) continue # make sure that none of the existing history downloads are the same Proper as the download candidate @@ -471,7 +470,7 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime logger.log('Ignored Proper already in history [%s]' % cur_proper.name) continue - logger.log(found_msg, logger.DEBUG) + logger.debug(found_msg) # finish populating the Proper instance # cur_proper.show_obj = cur_proper.parsed_show_obj.prodid @@ -557,16 +556,14 @@ def _download_propers(proper_list): if reject: if isinstance(reject, string_types): if scene_rej_nuked and not scene_nuked_active: - logger.log('Rejecting nuked release. Nuke reason [%s] source [%s]' % (reject, url), - logger.DEBUG) + logger.debug('Rejecting nuked release. Nuke reason [%s] source [%s]' % (reject, url)) else: - logger.log('Considering nuked release. Nuke reason [%s] source [%s]' % (reject, url), - logger.DEBUG) + logger.debug('Considering nuked release. Nuke reason [%s] source [%s]' % (reject, url)) reject = False elif scene_contains or non_scene_fallback: reject = False else: - logger.log('Rejecting as not scene release listed at any [%s]' % url, logger.DEBUG) + logger.debug('Rejecting as not scene release listed at any [%s]' % url) if reject: continue @@ -685,7 +682,7 @@ def _generic_name(name): def _set_last_proper_search(when): - logger.log(u'Setting the last Proper search in the DB to %s' % when, logger.DEBUG) + logger.debug(f'Setting the last Proper search in the DB to {when}') my_db = db.DBConnection() sql_result = my_db.select('SELECT * FROM info') diff --git a/sickgear/providers/__init__.py b/sickgear/providers/__init__.py index 1695162f..ecc0b6d4 100644 --- a/sickgear/providers/__init__.py +++ b/sickgear/providers/__init__.py @@ -177,7 +177,7 @@ def _create_newznab_source(config_string): except IndexError: params.update({k: d}) else: - logger.log(u'Skipping Newznab provider string: \'%s\', incorrect format' % config_string, logger.ERROR) + logger.error(f'Skipping Newznab provider string: \'{config_string}\', incorrect format') return None newznab_module = sys.modules['sickgear.providers.newznab'] @@ -213,8 +213,7 @@ def _create_torrent_rss_source(config_string): url = values[1] enabled = values[3] except ValueError: - logger.log(u"Skipping RSS Torrent provider string: '" + config_string + "', incorrect format", - logger.ERROR) + logger.error(f'Skipping RSS Torrent provider string: \'{config_string}\', incorrect format') return None try: diff --git a/sickgear/providers/alpharatio.py b/sickgear/providers/alpharatio.py index eb4e9a2e..bbb46c0d 100644 --- a/sickgear/providers/alpharatio.py +++ b/sickgear/providers/alpharatio.py @@ -105,7 +105,7 @@ class AlphaRatioProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) results = self._sort_seeding(mode, results + items[mode]) diff --git a/sickgear/providers/bithdtv.py b/sickgear/providers/bithdtv.py index 86e37964..b620519a 100644 --- a/sickgear/providers/bithdtv.py +++ b/sickgear/providers/bithdtv.py @@ -48,7 +48,7 @@ class BitHDTVProvider(generic.TorrentProvider): [(None is y or re.search(r'(?i)rss\slink', y)), self.has_all_cookies(['su', 'sp', 'sl'], 'h_'), 'search' in self.urls] + [(self.session.cookies.get('h_' + x) or 'sg!no!pw') in self.digest for x in ('su', 'sp', 'sl')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) @staticmethod def _has_signature(data=None): @@ -110,7 +110,7 @@ class BitHDTVProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/blutopia.py b/sickgear/providers/blutopia.py index c8458a22..b69664b1 100644 --- a/sickgear/providers/blutopia.py +++ b/sickgear/providers/blutopia.py @@ -54,7 +54,7 @@ class BlutopiaProvider(generic.TorrentProvider): def _authorised(self, **kwargs): return super(BlutopiaProvider, self)._authorised( - logged_in=self.logged_in, failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + logged_in=self.logged_in, failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) def logged_in(self, resp=None): @@ -102,7 +102,7 @@ class BlutopiaProvider(generic.TorrentProvider): show_type = self.show_obj.air_by_date and 'Air By Date' \ or self.show_obj.is_sports and 'Sports' or None if show_type: - logger.log(u'Provider does not carry shows of type: [%s], skipping' % show_type, logger.DEBUG) + logger.debug(f'Provider does not carry shows of type: [{show_type}], skipping') return results for search_string in search_params[mode]: @@ -159,7 +159,7 @@ class BlutopiaProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, log + search_url) diff --git a/sickgear/providers/btn.py b/sickgear/providers/btn.py index be0fb5da..6b87bff9 100644 --- a/sickgear/providers/btn.py +++ b/sickgear/providers/btn.py @@ -75,8 +75,7 @@ class BTNProvider(generic.TorrentProvider): self.tmr_limit_update('1', 'h', '150/hr %s' % data) self.log_failure_url(url, post_data, post_json) else: - logger.log(u'Action prematurely ended. %(prov)s server error response = %(desc)s' % - {'prov': self.name, 'desc': data}, logger.WARNING) + logger.warning(f'Action prematurely ended. {self.name} server error response = {data}') def _search_provider(self, search_params, age=0, **kwargs): @@ -118,7 +117,7 @@ class BTNProvider(generic.TorrentProvider): self._check_response(error_text, self.url_api, post_data=json_rpc(params)) return results except AuthException: - logger.log('API looks to be down, add un/pw config detail to be used as a fallback', logger.WARNING) + logger.warning('API looks to be down, add un/pw config detail to be used as a fallback') except (KeyError, Exception): pass @@ -247,7 +246,7 @@ class BTNProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(results) - cnt, search_url) @@ -267,7 +266,7 @@ class BTNProvider(generic.TorrentProvider): else: # If we don't have a release name we need to get creative - title = u'' + title = '' keys = ['Series', 'GroupName', 'Resolution', 'Source', 'Codec'] for key in keys: if key in data_json: @@ -353,8 +352,8 @@ class BTNProvider(generic.TorrentProvider): # Set maximum to 24 hours (24 * 60 * 60 = 86400 seconds) of "RSS" data search, # older items will be done through backlog if 86400 < seconds_since_last_update: - logger.log(u'Only trying to fetch the last 24 hours even though the last known successful update on ' + - '%s was over 24 hours' % self.name, logger.WARNING) + logger.warning(f'Only trying to fetch the last 24 hours even though the last known successful update on' + f' {self.name} was over 24 hours') seconds_since_last_update = 86400 return self._search_provider(dict(Cache=['']), age=seconds_since_last_update) diff --git a/sickgear/providers/eztv.py b/sickgear/providers/eztv.py index 5a723b1b..780d6ebf 100644 --- a/sickgear/providers/eztv.py +++ b/sickgear/providers/eztv.py @@ -106,7 +106,7 @@ class EztvProvider(generic.TorrentProvider): except (generic.HaltParseException, IndexError): pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/fano.py b/sickgear/providers/fano.py index ebb34fc8..471518f4 100644 --- a/sickgear/providers/fano.py +++ b/sickgear/providers/fano.py @@ -122,7 +122,7 @@ class FanoProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, log + search_url) diff --git a/sickgear/providers/filelist.py b/sickgear/providers/filelist.py index 7c883c91..2042f4fc 100644 --- a/sickgear/providers/filelist.py +++ b/sickgear/providers/filelist.py @@ -96,7 +96,7 @@ class FLProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, self.session.response.get('url')) diff --git a/sickgear/providers/filesharingtalk.py b/sickgear/providers/filesharingtalk.py index e97a69ec..1030e272 100644 --- a/sickgear/providers/filesharingtalk.py +++ b/sickgear/providers/filesharingtalk.py @@ -80,7 +80,7 @@ class FSTProvider(generic.NZBProvider): success, msg = self._check_cookie() if success: return False - logger.warning(u'%s: %s' % (msg, self.cookies)) + logger.warning(f'{msg}: {self.cookies}') self.cookies = None return None @@ -166,7 +166,7 @@ class FSTProvider(generic.NZBProvider): time.sleep(1.1) pass except (BaseException, Exception): - logger.error(u'Failed to parse. Traceback: %s' % traceback.format_exc()) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search((mode, search_mode)['Propers' == search_mode], len(results) - cnt, search_url) return results diff --git a/sickgear/providers/funfile.py b/sickgear/providers/funfile.py index e8aecaa0..f7e93b7b 100644 --- a/sickgear/providers/funfile.py +++ b/sickgear/providers/funfile.py @@ -106,7 +106,7 @@ class FunFileProvider(generic.TorrentProvider): except (generic.HaltParseException, AttributeError): pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/generic.py b/sickgear/providers/generic.py index eca6d7a2..17c00f20 100644 --- a/sickgear/providers/generic.py +++ b/sickgear/providers/generic.py @@ -166,9 +166,8 @@ class ProviderFailList(object): with self.lock: self.dirty = True self._fails.append(fail) - logger.log('Adding fail.%s for %s' % (ProviderFailTypes.names.get( - fail.fail_type, ProviderFailTypes.names[ProviderFailTypes.other]), self.provider_name()), - logger.DEBUG) + logger.debug('Adding fail.%s for %s' % (ProviderFailTypes.names.get( + fail.fail_type, ProviderFailTypes.names[ProviderFailTypes.other]), self.provider_name())) self.save_list() def save_list(self): @@ -426,8 +425,8 @@ class GenericProvider(object): if not limit_set: time_index = self.fail_time_index(base_limit=0) self.tmr_limit_wait = self.wait_time(time_index) - logger.log('Request limit reached. Waiting for %s until next retry. Message: %s' % - (self.tmr_limit_wait, desc or 'none found'), logger.WARNING) + logger.warning(f'Request limit reached. Waiting for {self.tmr_limit_wait} until next retry.' + f' Message: {desc or "none found"}') def wait_time(self, time_index=None): # type: (Optional[int]) -> datetime.timedelta @@ -503,8 +502,8 @@ class GenericProvider(object): # Ensure provider name output (e.g. when displaying config/provs) instead of e.g. thread "Tornado" prepend = ('[%s] :: ' % self.name, '')[any(x.name in threading.current_thread().name for x in sickgear.providers.sorted_sources())] - logger.log('%sToo many requests reached at %s, waiting for %s' % ( - prepend, self.fmt_delta(self.tmr_limit_time), self.fmt_delta(time_left)), logger.WARNING) + logger.warning(f'{prepend}Too many requests reached at {self.fmt_delta(self.tmr_limit_time)},' + f' waiting for {self.fmt_delta(time_left)}') return use_tmr_limit else: self.tmr_limit_time = None @@ -515,10 +514,9 @@ class GenericProvider(object): if self.is_waiting(): if log_warning: time_left = self.wait_time() - self.fail_newest_delta() - logger.log('Failed %s times, skipping provider for %s, last failure at %s with fail type: %s' % ( + logger.warning('Failed %s times, skipping provider for %s, last failure at %s with fail type: %s' % ( self.failure_count, self.fmt_delta(time_left), self.fmt_delta(self.failure_time), - ProviderFailTypes.names.get( - self.last_fail, ProviderFailTypes.names[ProviderFailTypes.other])), logger.WARNING) + ProviderFailTypes.names.get(self.last_fail, ProviderFailTypes.names[ProviderFailTypes.other]))) return True return False @@ -533,7 +531,7 @@ class GenericProvider(object): self._last_fail_type = fail_type self.fails.add_fail(*args, **kwargs) else: - logger.log('%s: Not logging same failure within 3 seconds' % self.name, logger.DEBUG) + logger.debug('%s: Not logging same failure within 3 seconds' % self.name) def get_url(self, url, skip_auth=False, use_tmr_limit=True, *args, **kwargs): # type: (AnyStr, bool, bool, Any, Any) -> Optional[AnyStr, Dict] @@ -580,7 +578,7 @@ class GenericProvider(object): if data and not isinstance(data, tuple) \ or isinstance(data, tuple) and data[0]: if 0 != self.failure_count: - logger.log('Unblocking provider: %s' % self.get_id(), logger.DEBUG) + logger.debug('Unblocking provider: %s' % self.get_id()) self.failure_count = 0 self.failure_time = None else: @@ -628,7 +626,7 @@ class GenericProvider(object): post += [' .. Post params: [%s]' % '&'.join([post_data])] if post_json: post += [' .. Json params: [%s]' % '&'.join([post_json])] - logger.log('Failure URL: %s%s' % (url, ''.join(post)), logger.WARNING) + logger.warning('Failure URL: %s%s' % (url, ''.join(post))) def get_id(self): # type: (...) -> AnyStr @@ -812,7 +810,7 @@ class GenericProvider(object): if not btih or not re.search('(?i)[0-9a-f]{32,40}', btih): assert not result.url.startswith('http') - logger.log('Unable to extract torrent hash from link: ' + ex(result.url), logger.ERROR) + logger.error('Unable to extract torrent hash from link: ' + ex(result.url)) return False urls = ['http%s://%s/torrent/%s.torrent' % (u + (btih.upper(),)) @@ -846,14 +844,14 @@ class GenericProvider(object): failure_monitor=False): if self._verify_download(cache_file): - logger.log(u'Downloaded %s result from %s' % (self.name, url)) + logger.log(f'Downloaded {self.name} result from {url}') try: helpers.move_file(cache_file, final_file) msg = 'moved' except (OSError, Exception): msg = 'copied cached file' - logger.log(u'Saved .%s data and %s to %s' % ( - (link_type, 'torrent cache')['magnet' == link_type], msg, final_file)) + logger.log(f'Saved .{(link_type, "torrent cache")["magnet" == link_type]} data' + f' and {msg} to {final_file}') saved = True break @@ -866,7 +864,7 @@ class GenericProvider(object): del(self.session.headers['Referer']) if not saved and 'magnet' == link_type: - logger.log(u'All torrent cache servers failed to return a downloadable result', logger.DEBUG) + logger.debug('All torrent cache servers failed to return a downloadable result') final_file = os.path.join(final_dir, '%s.%s' % (helpers.sanitize_filename(result.name), link_type)) try: with open(final_file, 'wb') as fp: @@ -874,12 +872,12 @@ class GenericProvider(object): fp.flush() os.fsync(fp.fileno()) saved = True - logger.log(u'Saved magnet link to file as some clients (or plugins) support this, %s' % final_file) + logger.log(f'Saved magnet link to file as some clients (or plugins) support this, {final_file}') if 'blackhole' == sickgear.TORRENT_METHOD: logger.log('Tip: If your client fails to load magnet in files, ' + 'change blackhole to a client connection method in search settings') except (BaseException, Exception): - logger.log(u'Failed to save magnet link to file, %s' % final_file) + logger.log(f'Failed to save magnet link to file, {final_file}') elif not saved: if 'torrent' == link_type and result.provider.get_id() in sickgear.PROVIDER_HOMES: t_result = result # type: TorrentSearchResult @@ -895,7 +893,7 @@ class GenericProvider(object): t_result.provider._valid_home(url_exclude=urls) setattr(sickgear, 'PROVIDER_EXCLUDE', ([], urls)[any([t_result.provider.url])]) - logger.log(u'Server failed to return anything useful', logger.ERROR) + logger.error('Server failed to return anything useful') return saved @@ -969,7 +967,7 @@ class GenericProvider(object): except (BaseException, Exception): pass - title = title and re.sub(r'\s+', '.', u'%s' % title) + title = title and re.sub(r'\s+', '.', f'{title}') if url and not re.match('(?i)magnet:', url): url = str(url).replace('&', '&') @@ -1193,10 +1191,10 @@ class GenericProvider(object): try: parse_result = parser.parse(title, release_group=self.get_id()) except InvalidNameException: - logger.log(u'Unable to parse the filename %s into a valid episode' % title, logger.DEBUG) + logger.debug(f'Unable to parse the filename {title} into a valid episode') continue except InvalidShowException: - logger.log(u'No match for search criteria in the parsed filename ' + title, logger.DEBUG) + logger.debug(f'No match for search criteria in the parsed filename {title}') continue if parse_result.show_obj.is_anime: @@ -1208,8 +1206,8 @@ class GenericProvider(object): continue if not (parse_result.show_obj.tvid == show_obj.tvid and parse_result.show_obj.prodid == show_obj.prodid): - logger.debug(u'Parsed show [%s] is not show [%s] we are searching for' % ( - parse_result.show_obj.unique_name, show_obj.unique_name)) + logger.debug(f'Parsed show [{parse_result.show_obj.unique_name}] is not show [{show_obj.unique_name}]' + f' we are searching for') continue parsed_show_obj = parse_result.show_obj @@ -1223,15 +1221,15 @@ class GenericProvider(object): if not (parsed_show_obj.air_by_date or parsed_show_obj.is_sports): if 'sponly' == search_mode: if len(parse_result.episode_numbers): - logger.log(u'This is supposed to be a season pack search but the result ' + title + - u' is not a valid season pack, skipping it', logger.DEBUG) + logger.debug(f'This is supposed to be a season pack search but the result {title}' + f' is not a valid season pack, skipping it') add_cache_entry = True if len(parse_result.episode_numbers) \ and (parse_result.season_number not in set([ep_obj.season for ep_obj in ep_obj_list]) or not [ep_obj for ep_obj in ep_obj_list if ep_obj.scene_episode in parse_result.episode_numbers]): - logger.log(u'The result ' + title + u' doesn\'t seem to be a valid episode that we are trying' + - u' to snatch, ignoring', logger.DEBUG) + logger.debug(f'The result {title} doesn\'t seem to be a valid episode that we are trying' + f' to snatch, ignoring') add_cache_entry = True else: if not len(parse_result.episode_numbers)\ @@ -1239,14 +1237,14 @@ class GenericProvider(object): and not [ep_obj for ep_obj in ep_obj_list if ep_obj.season == parse_result.season_number and ep_obj.episode in parse_result.episode_numbers]: - logger.log(u'The result ' + title + u' doesn\'t seem to be a valid season that we are trying' + - u' to snatch, ignoring', logger.DEBUG) + logger.debug(f'The result {title} doesn\'t seem to be a valid season that we are trying' + f' to snatch, ignoring') add_cache_entry = True elif len(parse_result.episode_numbers) and not [ ep_obj for ep_obj in ep_obj_list if ep_obj.season == parse_result.season_number and ep_obj.episode in parse_result.episode_numbers]: - logger.log(u'The result ' + title + ' doesn\'t seem to be a valid episode that we are trying' + - u' to snatch, ignoring', logger.DEBUG) + logger.debug(f'The result {title} doesn\'t seem to be a valid episode that we are trying' + f' to snatch, ignoring') add_cache_entry = True if not add_cache_entry: @@ -1255,8 +1253,8 @@ class GenericProvider(object): episode_numbers = parse_result.episode_numbers else: if not parse_result.is_air_by_date: - logger.log(u'This is supposed to be a date search but the result ' + title + - u' didn\'t parse as one, skipping it', logger.DEBUG) + logger.debug(f'This is supposed to be a date search but the result {title}' + f' didn\'t parse as one, skipping it') add_cache_entry = True else: season_number = parse_result.season_number @@ -1265,13 +1263,13 @@ class GenericProvider(object): if not episode_numbers or \ not [ep_obj for ep_obj in ep_obj_list if ep_obj.season == season_number and ep_obj.episode in episode_numbers]: - logger.log(u'The result ' + title + ' doesn\'t seem to be a valid episode that we are trying' + - u' to snatch, ignoring', logger.DEBUG) + logger.debug(f'The result {title} doesn\'t seem to be a valid episode that we are trying' + f' to snatch, ignoring') add_cache_entry = True # add parsed result to cache for usage later on if add_cache_entry: - logger.log(u'Adding item from search to cache: ' + title, logger.DEBUG) + logger.debug(f'Adding item from search to cache: {title}') ci = self.cache.add_cache_entry(title, url, parse_result=parse_result) if None is not ci: cl.append(ci) @@ -1288,11 +1286,11 @@ class GenericProvider(object): multi_ep = 1 < len(episode_numbers) if not want_ep: - logger.log(u'Ignoring result %s because we don\'t want an episode that is %s' - % (title, Quality.qualityStrings[quality]), logger.DEBUG) + logger.debug(f'Ignoring result {title} because we don\'t want an episode that is' + f' {Quality.qualityStrings[quality]}') continue - logger.log(u'Found result %s at %s' % (title, url), logger.DEBUG) + logger.debug(f'Found result {title} at {url}') # make a result object ep_obj_results = [] # type: List[TVEpisode] @@ -1317,14 +1315,14 @@ class GenericProvider(object): ep_num = None if 1 == len(ep_obj_results): ep_num = ep_obj_results[0].episode - logger.log(u'Single episode result.', logger.DEBUG) + logger.debug('Single episode result.') elif 1 < len(ep_obj_results): ep_num = MULTI_EP_RESULT - logger.log(u'Separating multi-episode result to check for later - result contains episodes: ' + - str(parse_result.episode_numbers), logger.DEBUG) + logger.debug(f'Separating multi-episode result to check for later - result contains episodes:' + f' {parse_result.episode_numbers}') elif 0 == len(ep_obj_results): ep_num = SEASON_RESULT - logger.log(u'Separating full season result to check for later', logger.DEBUG) + logger.debug('Separating full season result to check for later') if ep_num not in results: # noinspection PyTypeChecker @@ -1390,7 +1388,7 @@ class GenericProvider(object): if not self.should_skip(): str1, thing, str3 = (('', '%s item' % mode.lower(), ''), (' usable', 'proper', ' found'))['Propers' == mode] - logger.log((u'%s %s in response%s from %s' % (('No' + str1, count)[0 < count], ( + logger.log(('%s %s in response%s from %s' % (('No' + str1, count)[0 < count], ( '%s%s%s%s' % (('', 'freeleech ')[getattr(self, 'freeleech', False)], thing, maybe_plural(count), str3)), ('', ' (rejects: %s)' % rejects)[bool(rejects)], re.sub(r'(\s)\s+', r'\1', url))).replace('%%', '%')) @@ -1412,9 +1410,9 @@ class GenericProvider(object): reqd = 'cf_clearance' if reqd in ui_string_method(key) and reqd not in cookies: return False, \ - u'%(p)s Cookies setting require %(r)s. If %(r)s not found in browser, log out,' \ - u' delete site cookies, refresh browser, %(r)s should be created' % \ - dict(p=self.name, r='\'%s\'' % reqd) + '%(p)s Cookies setting require %(r)s. If %(r)s not found in browser, log out,' \ + ' delete site cookies, refresh browser, %(r)s should be created' % \ + dict(p=self.name, r='\'%s\'' % reqd) cj = requests.utils.add_dict_to_cookiejar(self.session.cookies, dict([x.strip().split('=', 1) for x in cookies.split(';') @@ -1586,7 +1584,7 @@ class NZBProvider(GenericProvider): if result_date: result_date = datetime.datetime(*result_date[0:6]) else: - logger.log(u'Unable to figure out the date for entry %s, skipping it' % title) + logger.log(f'Unable to figure out the date for entry {title}, skipping it') continue if not search_date or search_date < result_date: @@ -1918,7 +1916,7 @@ class TorrentProvider(GenericProvider): success, msg = self._check_cookie() if not success: self.cookies = None - logger.log(u'%s' % msg, logger.WARNING) + logger.warning(f'{msg}') return url_base = getattr(self, 'url_base', None) @@ -1998,12 +1996,12 @@ class TorrentProvider(GenericProvider): r'(?i)([1-3]((<[^>]+>)|\W)*(attempts|tries|remain)[\W\w]{,40}?(remain|left|attempt)|last[^<]+?attempt)', y)) logged_in, failed_msg = [None is not a and a or b for (a, b) in ( (logged_in, (lambda y=None: self.has_all_cookies())), - (failed_msg, (lambda y='': maxed_out(y) and u'Urgent abort, running low on login attempts. ' + - u'Password flushed to prevent service disruption to %s.' or + (failed_msg, (lambda y='': maxed_out(y) and 'Urgent abort, running low on login attempts. ' + + 'Password flushed to prevent service disruption to %s.' or (re.search(r'(?i)(username|password)((<[^>]+>)|\W)*' + r'(or|and|/|\s)((<[^>]+>)|\W)*(password|incorrect)', y) and - u'Invalid username or password for %s. Check settings' or - u'Failed to authenticate or parse a response from %s, abort provider'))) + 'Invalid username or password for %s. Check settings' or + 'Failed to authenticate or parse a response from %s, abort provider'))) )] if logged_in() and (not hasattr(self, 'urls') or bool(len(getattr(self, 'urls')))): @@ -2017,7 +2015,7 @@ class TorrentProvider(GenericProvider): if not self._check_auth(): return False except AuthException as e: - logger.log('%s' % ex(e), logger.ERROR) + logger.error('%s' % ex(e)) return False if isinstance(url, type([])): @@ -2094,7 +2092,7 @@ class TorrentProvider(GenericProvider): sickgear.save_config() msg = failed_msg(response) if msg: - logger.log(msg % self.name, logger.ERROR) + logger.error(msg % self.name) return False diff --git a/sickgear/providers/hdbits.py b/sickgear/providers/hdbits.py index 11542acd..6c8ed495 100644 --- a/sickgear/providers/hdbits.py +++ b/sickgear/providers/hdbits.py @@ -49,7 +49,7 @@ class HDBitsProvider(generic.TorrentProvider): def _check_auth_from_data(self, parsed_json): if 'status' in parsed_json and 5 == parsed_json.get('status') and 'message' in parsed_json: - logger.log(u'Incorrect username or password for %s: %s' % (self.name, parsed_json['message']), logger.DEBUG) + logger.debug(f'Incorrect username or password for {self.name}: {parsed_json["message"]}') raise AuthException('Your username or password for %s is incorrect, check your config.' % self.name) return True @@ -115,10 +115,10 @@ class HDBitsProvider(generic.TorrentProvider): try: if not (json_resp and self._check_auth_from_data(json_resp) and 'data' in json_resp): - logger.log(u'Response from %s does not contain any json data, abort' % self.name, logger.ERROR) + logger.error(f'Response from {self.name} does not contain any json data, abort') return results except AuthException as e: - logger.log(u'Authentication error: %s' % (ex(e)), logger.ERROR) + logger.error(f'Authentication error: {ex(e)}') return results cnt = len(items[mode]) diff --git a/sickgear/providers/hdspace.py b/sickgear/providers/hdspace.py index 103f1e46..d693d7af 100644 --- a/sickgear/providers/hdspace.py +++ b/sickgear/providers/hdspace.py @@ -128,7 +128,7 @@ class HDSpaceProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, log + search_url) results = self._sort_seeding(mode, results + items[mode]) diff --git a/sickgear/providers/hdtorrents.py b/sickgear/providers/hdtorrents.py index c9b88823..8fcb5067 100644 --- a/sickgear/providers/hdtorrents.py +++ b/sickgear/providers/hdtorrents.py @@ -131,7 +131,7 @@ class HDTorrentsProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, log + search_url) diff --git a/sickgear/providers/iptorrents.py b/sickgear/providers/iptorrents.py index c5801d64..f399be6c 100644 --- a/sickgear/providers/iptorrents.py +++ b/sickgear/providers/iptorrents.py @@ -58,7 +58,7 @@ class IPTorrentsProvider(generic.TorrentProvider): ['IPTorrents' in y, 'type="password"' not in y[0:2048], self.has_all_cookies()] + [(self.session.cookies.get(c, domain='') or 'sg!no!pw') in self.digest for c in ('uid', 'pass')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) @staticmethod def _has_signature(data=None): @@ -154,7 +154,7 @@ class IPTorrentsProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url, log_settings_hint) if self.is_search_finished(mode, items, cnt_search, rc['id'], last_recent_search, lrs_new, lrs_found): diff --git a/sickgear/providers/limetorrents.py b/sickgear/providers/limetorrents.py index 18ee1e7b..7a429b74 100644 --- a/sickgear/providers/limetorrents.py +++ b/sickgear/providers/limetorrents.py @@ -114,7 +114,7 @@ class LimeTorrentsProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) @@ -131,7 +131,7 @@ class LimeTorrentsProvider(generic.TorrentProvider): try: result = re.findall('(?i)"(magnet:[^"]+?)"', html)[0] except IndexError: - logger.log('Failed no magnet in response', logger.DEBUG) + logger.debug('Failed no magnet in response') return result diff --git a/sickgear/providers/magnetdl.py b/sickgear/providers/magnetdl.py index 5bad6c03..b6ed7559 100644 --- a/sickgear/providers/magnetdl.py +++ b/sickgear/providers/magnetdl.py @@ -99,7 +99,7 @@ class MagnetDLProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/morethan.py b/sickgear/providers/morethan.py index a25b9c50..bb94c431 100644 --- a/sickgear/providers/morethan.py +++ b/sickgear/providers/morethan.py @@ -112,7 +112,7 @@ class MoreThanProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/ncore.py b/sickgear/providers/ncore.py index ffbc9513..d99e9512 100644 --- a/sickgear/providers/ncore.py +++ b/sickgear/providers/ncore.py @@ -105,7 +105,7 @@ class NcoreProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/nebulance.py b/sickgear/providers/nebulance.py index f8005eca..843beb4a 100644 --- a/sickgear/providers/nebulance.py +++ b/sickgear/providers/nebulance.py @@ -119,7 +119,7 @@ class NebulanceProvider(generic.TorrentProvider): items[mode].append((title, download_url, seeders, self._bytesizer(size))) except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) results = self._sort_seeding(mode, results + items[mode]) diff --git a/sickgear/providers/newznab.py b/sickgear/providers/newznab.py index 2fe12d6a..fb7cc782 100644 --- a/sickgear/providers/newznab.py +++ b/sickgear/providers/newznab.py @@ -331,7 +331,7 @@ class NewznabProvider(generic.NZBProvider): except (BaseException, Exception): continue except (BaseException, Exception): - logger.log('Error parsing result for [%s]' % self.name, logger.DEBUG) + logger.debug('Error parsing result for [%s]' % self.name) if not caps and self._caps and not all_cats and self._caps_all_cats and not cats and self._caps_cats: self._check_excludes(cats) @@ -644,14 +644,14 @@ class NewznabProvider(generic.NZBProvider): if not s.show_obj.is_anime and not s.show_obj.is_sports: if not getattr(s, 'wanted_quality', None): # this should not happen, the creation is missing for the search in this case - logger.log('wanted_quality property was missing for search, creating it', logger.WARNING) + logger.warning('wanted_quality property was missing for search, creating it') ep_status, ep_quality = Quality.split_composite_status(ep_obj.status) s.wanted_quality = get_wanted_qualities(ep_obj, ep_status, ep_quality, unaired=True) needed.check_needed_qualities(s.wanted_quality) if not hasattr(ep_obj, 'eps_aired_in_season'): # this should not happen, the creation is missing for the search in this case - logger.log('eps_aired_in_season property was missing for search, creating it', logger.WARNING) + logger.warning('eps_aired_in_season property was missing for search, creating it') ep_count, ep_count_scene = get_aired_in_season(ep_obj.show_obj) ep_obj.eps_aired_in_season = ep_count.get(ep_obj.season, 0) ep_obj.eps_aired_in_scene_season = ep_count_scene.get(ep_obj.scene_season, 0) if ep_obj.show_obj.is_scene \ @@ -978,14 +978,14 @@ class NewznabProvider(generic.NZBProvider): parsed_xml, n_spaces = self.cache.parse_and_get_ns(data) items = parsed_xml.findall('channel/item') except (BaseException, Exception): - logger.log('Error trying to load %s RSS feed' % self.name, logger.WARNING) + logger.warning('Error trying to load %s RSS feed' % self.name) break if not self._check_auth_from_data(parsed_xml, search_url): break if 'rss' != parsed_xml.tag: - logger.log('Resulting XML from %s isn\'t RSS, not parsing it' % self.name, logger.WARNING) + logger.warning('Resulting XML from %s isn\'t RSS, not parsing it' % self.name) break i and time.sleep(2.1) @@ -996,8 +996,7 @@ class NewznabProvider(generic.NZBProvider): if title and url: results.append(item) else: - logger.log('The data returned from %s is incomplete, this result is unusable' % self.name, - logger.DEBUG) + logger.debug('The data returned from %s is incomplete, this result is unusable' % self.name) # get total and offset attributes try: @@ -1036,8 +1035,8 @@ class NewznabProvider(generic.NZBProvider): # there are more items available than the amount given in one call, grab some more items = total - request_params['offset'] - logger.log('%s more item%s to fetch from a batch of up to %s items.' - % (items, helpers.maybe_plural(items), request_params['limit']), logger.DEBUG) + logger.debug(f'{items} more item{helpers.maybe_plural(items)} to fetch from a batch of up to' + f' {request_params["limit"]} items.') batch_count = self._log_result(results, mode, cnt, search_url) exit_log = False @@ -1125,7 +1124,7 @@ class NewznabProvider(generic.NZBProvider): result_date = self._parse_pub_date(item) if not result_date: - logger.log(u'Unable to figure out the date for entry %s, skipping it' % title) + logger.log(f'Unable to figure out the date for entry {title}, skipping it') continue result_size, result_uid = self._parse_size_uid(item, ns=n_space) @@ -1201,7 +1200,7 @@ class NewznabCache(tvcache.TVCache): else: (items, n_spaces) = self.provider.cache_data(needed=needed) except (BaseException, Exception) as e: - logger.log('Error updating Cache: %s' % ex(e), logger.ERROR) + logger.error('Error updating Cache: %s' % ex(e)) items = None if items: @@ -1257,5 +1256,4 @@ class NewznabCache(tvcache.TVCache): if title and url: return self.add_cache_entry(title, url, tvid_prodid=ids) - logger.log('Data returned from the %s feed is incomplete, this result is unusable' % self.provider.name, - logger.DEBUG) + logger.debug('Data returned from the %s feed is incomplete, this result is unusable' % self.provider.name) diff --git a/sickgear/providers/nyaa.py b/sickgear/providers/nyaa.py index 65156509..4bb3f460 100644 --- a/sickgear/providers/nyaa.py +++ b/sickgear/providers/nyaa.py @@ -91,7 +91,7 @@ class NyaaProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/omgwtfnzbs.py b/sickgear/providers/omgwtfnzbs.py index 054dfad9..1d7e4bc6 100644 --- a/sickgear/providers/omgwtfnzbs.py +++ b/sickgear/providers/omgwtfnzbs.py @@ -87,8 +87,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider): if re.search('(?i)(information is incorrect|in(?:valid|correct).*?(?:username|api))', data_json.get('notice')): - logger.log(u'Incorrect authentication credentials for ' + self.name + ' : ' + str(description_text), - logger.DEBUG) + logger.debug(f'Incorrect authentication credentials for {self.name} : {description_text}') raise AuthException( 'Your authentication credentials for ' + self.name + ' are incorrect, check your config.') @@ -96,7 +95,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider): return True else: - logger.log(u'Unknown error given from ' + self.name + ' : ' + str(description_text), logger.DEBUG) + logger.debug(f'Unknown error given from {self.name} : {str(description_text)}') return False return True @@ -149,7 +148,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider): self.tmr_limit_update('1', 'h', 'Your 24 hour limit of 10 NZBs has been reached') self.log_failure_url(url) elif '' not in data or 'seem to be logged in' in data: - logger.log('Failed nzb data response: %s' % data, logger.DEBUG) + logger.debug('Failed nzb data response: %s' % data) else: result = data return result @@ -345,7 +344,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider): time.sleep(1.1) pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') mode = (mode, search_mode)['Propers' == search_mode] self._log_search(mode, len(results) - cnt, search_url) @@ -400,7 +399,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider): if success and self.nn: success, msg = None, 'pm dev in irc about this feature' if not success: - logger.log(u'%s: %s' % (msg, self.cookies), logger.WARNING) + logger.warning(f'{msg}: {self.cookies}') self.cookies = None return None return False diff --git a/sickgear/providers/pretome.py b/sickgear/providers/pretome.py index 23d067dd..460fd807 100644 --- a/sickgear/providers/pretome.py +++ b/sickgear/providers/pretome.py @@ -100,7 +100,7 @@ class PreToMeProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.error(u'Failed to parse. Traceback: %s' % traceback.format_exc()) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/privatehd.py b/sickgear/providers/privatehd.py index 7ba28252..08ae3a7d 100644 --- a/sickgear/providers/privatehd.py +++ b/sickgear/providers/privatehd.py @@ -56,7 +56,7 @@ class PrivateHDProvider(generic.TorrentProvider): return super(PrivateHDProvider, self)._authorised( logged_in=(lambda y='': 'English' in y and 'auth/login' not in y and all( [(self.session.cookies.get('privatehdx_session', domain='') or 'sg!no!pw') in self.digest])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) def _search_provider(self, search_params, **kwargs): @@ -88,7 +88,7 @@ class PrivateHDProvider(generic.TorrentProvider): show_type = self.show_obj.air_by_date and 'Air By Date' \ or self.show_obj.is_sports and 'Sports' or self.show_obj.is_anime and 'Anime' or None if show_type: - logger.log(u'Provider does not carry shows of type: [%s], skipping' % show_type, logger.DEBUG) + logger.debug(f'Provider does not carry shows of type: [{show_type}], skipping') return results for search_string in search_params[mode]: @@ -141,7 +141,7 @@ class PrivateHDProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, log + search_url) diff --git a/sickgear/providers/ptf.py b/sickgear/providers/ptf.py index da1c94f2..d041d43a 100644 --- a/sickgear/providers/ptf.py +++ b/sickgear/providers/ptf.py @@ -56,7 +56,7 @@ class PTFProvider(generic.TorrentProvider): logged_in=(lambda y='': all( ['RSS Feed' in y, self.has_all_cookies('session_key')] + [(self.session.cookies.get(x) or 'sg!no!pw') in self.digest for x in ['session_key']])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) def _search_provider(self, search_params, **kwargs): @@ -144,7 +144,7 @@ class PTFProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, log + self.session.response.get('url')) diff --git a/sickgear/providers/rarbg.py b/sickgear/providers/rarbg.py index ab9f2ffe..8889bfc3 100644 --- a/sickgear/providers/rarbg.py +++ b/sickgear/providers/rarbg.py @@ -68,7 +68,7 @@ class RarbgProvider(generic.TorrentProvider): return True time.sleep(2) - logger.log(u'No usable API token returned from: %s' % self.urls['api_token'], logger.ERROR) + logger.error(f'No usable API token returned from: {self.urls["api_token"]}') return False @staticmethod diff --git a/sickgear/providers/revtt.py b/sickgear/providers/revtt.py index 50527f39..2e367969 100644 --- a/sickgear/providers/revtt.py +++ b/sickgear/providers/revtt.py @@ -102,7 +102,7 @@ class RevTTProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, self.session.response.get('url')) diff --git a/sickgear/providers/rsstorrent.py b/sickgear/providers/rsstorrent.py index 802eae4e..31971841 100644 --- a/sickgear/providers/rsstorrent.py +++ b/sickgear/providers/rsstorrent.py @@ -59,7 +59,7 @@ class TorrentRssProvider(generic.TorrentProvider): title, url = None, None if item.title: - title = re.sub(r'\s+', '.', u'' + item.title) + title = re.sub(r'\s+', '.', '' + item.title) attempt_list = [lambda: item.torrent_magneturi, lambda: item.enclosures[0].href, diff --git a/sickgear/providers/scenehd.py b/sickgear/providers/scenehd.py index 74da4457..a04aa810 100644 --- a/sickgear/providers/scenehd.py +++ b/sickgear/providers/scenehd.py @@ -47,7 +47,7 @@ class SceneHDProvider(generic.TorrentProvider): return super(SceneHDProvider, self)._authorised( logged_in=(lambda y='': ['RSS links' in y] and all( [(self.session.cookies.get(c, domain='') or 'sg!no!pw') in self.digest for c in ('uid', 'pass')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) def _search_provider(self, search_params, **kwargs): @@ -109,7 +109,7 @@ class SceneHDProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/scenetime.py b/sickgear/providers/scenetime.py index f4f783fb..edc318f0 100644 --- a/sickgear/providers/scenetime.py +++ b/sickgear/providers/scenetime.py @@ -50,7 +50,7 @@ class SceneTimeProvider(generic.TorrentProvider): ['staff-support' in y, self.has_all_cookies()] + [(self.session.cookies.get(x, domain='') or 'sg!no!pw') in self.digest for x in ('uid', 'pass')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) @staticmethod def _has_signature(data=None): @@ -146,7 +146,7 @@ class SceneTimeProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url, log_settings_hint) diff --git a/sickgear/providers/shazbat.py b/sickgear/providers/shazbat.py index 3121924d..81bf520c 100644 --- a/sickgear/providers/shazbat.py +++ b/sickgear/providers/shazbat.py @@ -134,7 +134,7 @@ class ShazbatProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) results = self._sort_seeding(mode, results + items[mode]) diff --git a/sickgear/providers/showrss.py b/sickgear/providers/showrss.py index e9356e14..392e8e45 100644 --- a/sickgear/providers/showrss.py +++ b/sickgear/providers/showrss.py @@ -114,7 +114,7 @@ class ShowRSSProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) results = self._sort_seeding(mode, results + items[mode]) diff --git a/sickgear/providers/snowfl.py b/sickgear/providers/snowfl.py index 25f46c3a..b0a252ac 100644 --- a/sickgear/providers/snowfl.py +++ b/sickgear/providers/snowfl.py @@ -117,7 +117,7 @@ class SnowflProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/speedapp.py b/sickgear/providers/speedapp.py index 478e20b3..a354988a 100644 --- a/sickgear/providers/speedapp.py +++ b/sickgear/providers/speedapp.py @@ -46,7 +46,7 @@ class SpeedAppProvider(generic.TorrentProvider): return super(SpeedAppProvider, self)._authorised( logged_in=self.logged_in, parse_json=True, headers=self.auth_header(), - failed_msg=(lambda y=None: u'Invalid token or permissions for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid token or permissions for %s. Check settings')) def logged_in(self, resp=None): diff --git a/sickgear/providers/speedcd.py b/sickgear/providers/speedcd.py index 9964362a..f1a12083 100644 --- a/sickgear/providers/speedcd.py +++ b/sickgear/providers/speedcd.py @@ -94,9 +94,9 @@ class SpeedCDProvider(generic.TorrentProvider): self.digest = 'inSpeed_speedian=%s' % self.session.cookies.get('inSpeed_speedian') sickgear.save_config() result = True - logger.log('Cookie details for %s updated.' % self.name, logger.DEBUG) + logger.debug('Cookie details for %s updated.' % self.name) elif not self.failure_count: - logger.log('Invalid cookie details for %s and login failed. Check settings' % self.name, logger.ERROR) + logger.error('Invalid cookie details for %s and login failed. Check settings' % self.name) return result @staticmethod diff --git a/sickgear/providers/thepiratebay.py b/sickgear/providers/thepiratebay.py index bf57db9f..1e390aef 100644 --- a/sickgear/providers/thepiratebay.py +++ b/sickgear/providers/thepiratebay.py @@ -113,7 +113,7 @@ class ThePirateBayProvider(generic.TorrentProvider): if not self._reject_item(seeders, leechers): status, info_hash = [cur_item.get(k) for k in ('status', 'info_hash')] if self.confirmed and not rc['verify'].search(status): - logger.log(u'Skipping untrusted non-verified result: ' + title, logger.DEBUG) + logger.debug('Skipping untrusted non-verified result: ' + title) continue download_magnet = info_hash if '&tr=' in info_hash \ else self._dhtless_magnet(info_hash, title) @@ -236,7 +236,7 @@ class ThePirateBayProvider(generic.TorrentProvider): if self.confirmed and not ( tr.find('img', title=rc['verify']) or tr.find('img', alt=rc['verify']) or tr.find('img', src=rc['verify'])): - logger.log(u'Skipping untrusted non-verified result: ' + title, logger.DEBUG) + logger.debug('Skipping untrusted non-verified result: ' + title) continue if title and download_magnet: @@ -245,7 +245,7 @@ class ThePirateBayProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) results = self._sort_seeding(mode, results + items[mode]) diff --git a/sickgear/providers/torlock.py b/sickgear/providers/torlock.py index 79374449..3ddb1346 100644 --- a/sickgear/providers/torlock.py +++ b/sickgear/providers/torlock.py @@ -121,7 +121,7 @@ class TorLockProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/torrentday.py b/sickgear/providers/torrentday.py index 3badba6f..2e0d751d 100644 --- a/sickgear/providers/torrentday.py +++ b/sickgear/providers/torrentday.py @@ -57,7 +57,7 @@ class TorrentDayProvider(generic.TorrentProvider): ['RSS URL' in y, self.has_all_cookies()] + [(self.session.cookies.get(c, domain='') or 'sg!no!pw') in self.digest for c in ('uid', 'pass')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) @staticmethod def _has_signature(data=None): diff --git a/sickgear/providers/torrenting.py b/sickgear/providers/torrenting.py index 0870d459..c8c5c1b0 100644 --- a/sickgear/providers/torrenting.py +++ b/sickgear/providers/torrenting.py @@ -47,7 +47,7 @@ class TorrentingProvider(generic.TorrentProvider): logged_in=(lambda y='': all( ['RSS link' in y, self.has_all_cookies()] + [(self.session.cookies.get(x) or 'sg!no!pw') in self.digest for x in ('uid', 'pass')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) @staticmethod def _has_signature(data=None): @@ -107,7 +107,7 @@ class TorrentingProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, search_url) diff --git a/sickgear/providers/torrentleech.py b/sickgear/providers/torrentleech.py index 148353f9..faeecb4d 100644 --- a/sickgear/providers/torrentleech.py +++ b/sickgear/providers/torrentleech.py @@ -44,7 +44,7 @@ class TorrentLeechProvider(generic.TorrentProvider): return super(TorrentLeechProvider, self)._authorised( logged_in=(lambda y='': all( ['TorrentLeech' in y, 'type="password"' not in y[0:4096], self.has_all_cookies(pre='tl')])), - failed_msg=(lambda y=None: u'Invalid cookie details for %s. Check settings')) + failed_msg=(lambda y=None: 'Invalid cookie details for %s. Check settings')) @staticmethod def _has_signature(data=None): diff --git a/sickgear/providers/tvchaosuk.py b/sickgear/providers/tvchaosuk.py index 8897cf92..c7576fd1 100644 --- a/sickgear/providers/tvchaosuk.py +++ b/sickgear/providers/tvchaosuk.py @@ -142,7 +142,7 @@ class TVChaosUKProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') if soup: soup.clear(True) diff --git a/sickgear/providers/xspeeds.py b/sickgear/providers/xspeeds.py index e500b438..8b3a7487 100644 --- a/sickgear/providers/xspeeds.py +++ b/sickgear/providers/xspeeds.py @@ -67,7 +67,7 @@ class XspeedsProvider(generic.TorrentProvider): if self.should_skip(): return results for search_string in search_params[mode]: - search_string = search_string.replace(u'£', '%') + search_string = search_string.replace('£', '%') search_string = re.sub(r'[\s.]+', '%', search_string) kwargs = dict(post_data={'keywords': search_string, 'do': 'quick_sort', 'page': '0', @@ -131,7 +131,7 @@ class XspeedsProvider(generic.TorrentProvider): except generic.HaltParseException: pass except (BaseException, Exception): - logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) + logger.error(f'Failed to parse. Traceback: {traceback.format_exc()}') self._log_search(mode, len(items[mode]) - cnt, ('search string: ' + search_string.replace('%', '%%'), self.name)['Cache' == mode]) diff --git a/sickgear/rssfeeds.py b/sickgear/rssfeeds.py index dd24463c..f91b67fe 100644 --- a/sickgear/rssfeeds.py +++ b/sickgear/rssfeeds.py @@ -30,9 +30,9 @@ class RSSFeeds(object): if data and 'error' in data.feed: err_code = data.feed['error']['code'] err_desc = data.feed['error']['description'] - logger.log(u'RSS error:[%s] code:[%s]' % (err_desc, err_code), logger.DEBUG) + logger.debug(f'RSS error:[{err_desc}] code:[{err_code}]') else: - logger.log(u'RSS error loading url: ' + url, logger.DEBUG) + logger.debug(f'RSS error loading url: {url}') except (BaseException, Exception) as e: - logger.log(u'RSS error: ' + ex(e), logger.DEBUG) + logger.debug(f'RSS error: {ex(e)}') diff --git a/sickgear/sab.py b/sickgear/sab.py index 8efa531b..e98b69f4 100644 --- a/sickgear/sab.py +++ b/sickgear/sab.py @@ -67,10 +67,10 @@ def send_nzb(search_result): return False kwargs['files'] = {'nzbfile': ('%s.nzb' % search_result.name, nzb_data)} - logger.log(u'Sending %s to SABnzbd: %s' % (nzb_type, search_result.name)) + logger.log(f'Sending {nzb_type} to SABnzbd: {search_result.name}') url = '%sapi' % sickgear.SAB_HOST - logger.log(u'SABnzbd at %s sent params: %s' % (url, params), logger.DEBUG) + logger.debug(f'SABnzbd at {url} sent params: {params}') success, result = _get_url(url, **kwargs) if not success: @@ -78,23 +78,23 @@ def send_nzb(search_result): # do some crude parsing of the result text to determine what SAB said if result.get('status'): - logger.log(u'Success from SABnzbd using %s' % nzb_type, logger.DEBUG) + logger.debug(f'Success from SABnzbd using {nzb_type}') return True elif 'error' in result: - logger.log(u'Failed using %s with SABnzbd, response: %s' % (nzb_type, result.get('error', 'und')), logger.ERROR) + logger.error(f'Failed using {nzb_type} with SABnzbd, response: {result.get("error", "und")}') else: - logger.log(u'Failure unknown using %s with SABnzbd, response: %s' % (nzb_type, result), logger.ERROR) + logger.error(f'Failure unknown using {nzb_type} with SABnzbd, response: {result}') return False def _check_sab_response(result): if 0 == len(result): - logger.log('No data returned from SABnzbd, nzb not used', logger.ERROR) + logger.error('No data returned from SABnzbd, nzb not used') return False, 'No data from SABnzbd' if 'error' in result: - logger.log(result['error'], logger.ERROR) + logger.error(result['error']) return False, result['error'] return True, result @@ -103,7 +103,7 @@ def _get_url(url, params=None, **kwargs): result = sickgear.helpers.get_url(url, params=params, parse_json=True, **kwargs) if None is result: - logger.log('Error, no response from SABnzbd', logger.ERROR) + logger.error('Error, no response from SABnzbd') return False, 'Error, no response from SABnzbd' return True, result @@ -132,7 +132,7 @@ def test_authentication(host=None, username=None, password=None, apikey=None): url = '%sapi' % host # send the test request - logger.log(u'SABnzbd test URL: %s with parameters: %s' % (url, params), logger.DEBUG) + logger.debug(f'SABnzbd test URL: {url} with parameters: {params}') success, result = _get_url(url, params=params) if not success: return False, result diff --git a/sickgear/scene_exceptions.py b/sickgear/scene_exceptions.py index ceaa42d4..3e4064fe 100644 --- a/sickgear/scene_exceptions.py +++ b/sickgear/scene_exceptions.py @@ -36,7 +36,7 @@ from .sgdatetime import timestamp_near import lib.rarfile.rarfile as rarfile from _23 import list_range -from six import iteritems, text_type +from six import iteritems # noinspection PyUnreachableCode if False: @@ -229,10 +229,10 @@ def retrieve_exceptions(): """ global exception_dict, anidb_exception_dict, xem_exception_dict - # exceptions are stored on github pages + # exceptions are stored on GitHub pages for tvid in sickgear.TVInfoAPI().sources: if should_refresh(sickgear.TVInfoAPI(tvid).name): - logger.log(u'Checking for scene exception updates for %s' % sickgear.TVInfoAPI(tvid).name) + logger.log(f'Checking for scene exception updates for {sickgear.TVInfoAPI(tvid).name}') url = sickgear.TVInfoAPI(tvid).config.get('scene_url') if not url: @@ -241,7 +241,7 @@ def retrieve_exceptions(): url_data = helpers.get_url(url) if None is url_data: # When None is urlData, trouble connecting to github - logger.log(u'Check scene exceptions update failed. Unable to get URL: %s' % url, logger.ERROR) + logger.error(f'Check scene exceptions update failed. Unable to get URL: {url}') continue else: @@ -307,8 +307,8 @@ def retrieve_exceptions(): try: cur_exception, cur_season = next(iteritems(cur_exception_dict)) except (BaseException, Exception): - logger.log('scene exception error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('scene exception error') + logger.error(traceback.format_exc()) continue cl.append(['INSERT INTO scene_exceptions' @@ -322,9 +322,9 @@ def retrieve_exceptions(): # since this could invalidate the results of the cache we clear it out after updating if changed_exceptions: - logger.log(u'Updated scene exceptions') + logger.log('Updated scene exceptions') else: - logger.log(u'No scene exceptions update needed') + logger.log('No scene exceptions update needed') # cleanup exception_dict.clear() @@ -353,14 +353,13 @@ def update_scene_exceptions(tvid, prodid, scene_exceptions): # A change has been made to the scene exception list. Let's clear the cache, to make this visible exceptionsCache[(tvid, prodid)] = defaultdict(list) - logger.log(u'Updating scene exceptions', logger.MESSAGE) + logger.log('Updating scene exceptions', logger.MESSAGE) for exception in scene_exceptions: cur_season, cur_exception = exception.split('|', 1) try: cur_season = int(cur_season) except (BaseException, Exception): - logger.log('invalid scene exception: %s - %s:%s' % ('%s:%s' % (tvid, prodid), cur_season, cur_exception), - logger.ERROR) + logger.error('invalid scene exception: %s - %s:%s' % ('%s:%s' % (tvid, prodid), cur_season, cur_exception)) continue exceptionsCache[(tvid, prodid)][cur_season].append(cur_exception) @@ -377,7 +376,7 @@ def _custom_exceptions_fetcher(): cnt_updated_numbers = 0 src_id = 'GHSG' - logger.log(u'Checking to update custom alternatives from %s' % src_id) + logger.log(f'Checking to update custom alternatives from {src_id}') dirpath = os.path.join(sickgear.CACHE_DIR, 'alts') tmppath = os.path.join(dirpath, 'tmp') @@ -399,7 +398,7 @@ def _custom_exceptions_fetcher(): rar_handle = rarfile.RarFile(file_rar) rar_handle.extractall(path=dirpath, pwd='sickgear_alt') except(BaseException, Exception) as e: - logger.log(u'Failed to unpack archive: %s with error: %s' % (file_rar, ex(e)), logger.ERROR) + logger.error(f'Failed to unpack archive: {file_rar} with error: {ex(e)}') if rar_handle: rar_handle.close() @@ -411,7 +410,7 @@ def _custom_exceptions_fetcher(): set_last_refresh(src_id) if not fetch_data and not os.path.isfile(file_cache): - logger.debug(u'Unable to fetch custom exceptions, skipped: %s' % file_rar) + logger.debug(f'Unable to fetch custom exceptions, skipped: {file_rar}') return custom_exception_dict, cnt_updated_numbers, should_refresh(src_id, iv, remaining=True) data = {} @@ -419,7 +418,7 @@ def _custom_exceptions_fetcher(): with io.open(file_cache) as fh: data = json_load(fh) except(BaseException, Exception) as e: - logger.log(u'Failed to unpack json data: %s with error: %s' % (file_rar, ex(e)), logger.ERROR) + logger.error(f'Failed to unpack json data: {file_rar} with error: {ex(e)}') # handle data from .scene_numbering import find_scene_numbering, set_scene_numbering_helper @@ -459,11 +458,9 @@ def _custom_exceptions_fetcher(): used.add((for_season, for_episode, target_season, target_episode)) if sn and ((for_season, for_episode) + sn) not in used \ and (for_season, for_episode) not in used: - logger.log( - u'Skipped setting "%s" episode %sx%s to target a release %sx%s because set to %sx%s' - % (show_obj.unique_name, for_season, for_episode, - target_season, target_episode, sn[0], sn[1]), - logger.DEBUG) + logger.debug(f'Skipped setting "{show_obj.unique_name}" episode {for_season}x{for_episode}' + f' to target a release {target_season}x{target_episode}' + f' because set to {sn[0]}x{sn[1]}') else: used.add((for_season, for_episode)) if not sn or sn != (target_season, target_episode): # not already set @@ -482,7 +479,7 @@ def _anidb_exceptions_fetcher(): global anidb_exception_dict if should_refresh('anidb'): - logger.log(u'Checking for AniDB scene exception updates') + logger.log('Checking for AniDB scene exception updates') for cur_show_obj in filter(lambda _s: _s.is_anime and TVINFO_TVDB == _s.tvid, sickgear.showList): try: anime = create_anidb_obj(name=cur_show_obj.name, tvdbid=cur_show_obj.prodid, autoCorrectName=True) @@ -506,15 +503,15 @@ def _xem_exceptions_fetcher(): if should_refresh(xem_list): for tvid in [i for i in sickgear.TVInfoAPI().sources if 'xem_origin' in sickgear.TVInfoAPI(i).config]: - logger.log(u'Checking for XEM scene exception updates for %s' % sickgear.TVInfoAPI(tvid).name) + logger.log(f'Checking for XEM scene exception updates for {sickgear.TVInfoAPI(tvid).name}') url = 'https://thexem.info/map/allNames?origin=%s%s&seasonNumbers=1'\ % (sickgear.TVInfoAPI(tvid).config['xem_origin'], ('&language=us', '')['xem' == xem_list]) parsed_json = helpers.get_url(url, parse_json=True, timeout=90) if not parsed_json: - logger.log(u'Check scene exceptions update failed for %s, Unable to get URL: %s' - % (sickgear.TVInfoAPI(tvid).name, url), logger.ERROR) + logger.error(f'Check scene exceptions update failed for {sickgear.TVInfoAPI(tvid).name},' + f' Unable to get URL: {url}') continue if 'failure' == parsed_json['result']: @@ -546,21 +543,20 @@ def _xem_get_ids(infosrc_name, xem_origin): url = 'https://thexem.info/map/havemap?origin=%s' % xem_origin task = 'Fetching show ids with%s xem scene mapping%s for origin' - logger.log(u'%s %s' % (task % ('', 's'), infosrc_name)) + logger.log(f'{task % ("", "s")} {infosrc_name}') parsed_json = helpers.get_url(url, parse_json=True, timeout=90) if not isinstance(parsed_json, dict) or not parsed_json: - logger.log(u'Failed %s %s, Unable to get URL: %s' - % (task.lower() % ('', 's'), infosrc_name, url), logger.ERROR) + logger.error(f'Failed {task.lower() % ("", "s")} {infosrc_name},' + f' Unable to get URL: {url}') else: if 'success' == parsed_json.get('result', '') and 'data' in parsed_json: xem_ids = list(set(filter(lambda prodid: 0 < prodid, map(lambda pid: helpers.try_int(pid), parsed_json['data'])))) if 0 == len(xem_ids): - logger.log(u'Failed %s %s, no data items parsed from URL: %s' - % (task.lower() % ('', 's'), infosrc_name, url), logger.WARNING) + logger.warning(f'Failed {task.lower() % ("", "s")} {infosrc_name},' + f' no data items parsed from URL: {url}') - logger.log(u'Finished %s %s' % (task.lower() % (' %s' % len(xem_ids), helpers.maybe_plural(xem_ids)), - infosrc_name)) + logger.log(f'Finished {task.lower() % (f" {len(xem_ids)}", helpers.maybe_plural(xem_ids))} {infosrc_name}') return xem_ids diff --git a/sickgear/scene_numbering.py b/sickgear/scene_numbering.py index cccb4abc..885fc527 100644 --- a/sickgear/scene_numbering.py +++ b/sickgear/scene_numbering.py @@ -799,7 +799,7 @@ def xem_refresh(tvid, prodid, force=False): refresh = True if refresh or force: - logger.log(u'Looking up XEM scene mapping for show %s on %s' % (prodid, tvinfo.name), logger.DEBUG) + logger.debug(f'Looking up XEM scene mapping for show {prodid} on {tvinfo.name}') # mark refreshed my_db.upsert('xem_refresh', @@ -809,7 +809,7 @@ def xem_refresh(tvid, prodid, force=False): try: parsed_json = sickgear.helpers.get_url(url, parse_json=True, timeout=90) if not parsed_json or '' == parsed_json: - logger.log(u'No XEM data for show %s on %s' % (prodid, tvinfo.name), logger.MESSAGE) + logger.log(f'No XEM data for show {prodid} on {tvinfo.name}', logger.MESSAGE) return if 'success' in parsed_json['result']: @@ -828,11 +828,10 @@ def xem_refresh(tvid, prodid, force=False): my_db = db.DBConnection() my_db.mass_action(cl) else: - logger.log(u'Empty lookup result - no XEM data for show %s on %s' % (prodid, tvinfo.name), logger.DEBUG) + logger.debug(f'Empty lookup result - no XEM data for show {prodid} on {tvinfo.name}') except (BaseException, Exception) as e: - logger.log(u'Exception refreshing XEM data for show ' + str(prodid) + ' on ' + tvinfo.name + ': ' + ex(e), - logger.WARNING) - logger.log(traceback.format_exc(), logger.ERROR) + logger.warning(f'Exception refreshing XEM data for show {str(prodid)} on {tvinfo.name}: {ex(e)}') + logger.error(traceback.format_exc()) def fix_xem_numbering(tvid, prodid): @@ -866,9 +865,7 @@ def fix_xem_numbering(tvid, prodid): update_scene_episode = False update_scene_absolute_number = False - logger.log( - u'Fixing any XEM scene mapping issues for show %s on %s' % (prodid, sickgear.TVInfoAPI(tvid).name), - logger.DEBUG) + logger.debug(f'Fixing any XEM scene mapping issues for show {prodid} on {sickgear.TVInfoAPI(tvid).name}') cl = [] for cur_row in sql_result: @@ -1001,15 +998,15 @@ def set_scene_numbering_helper(tvid, prodid, for_season=None, for_episode=None, if not show_obj.is_anime: scene_season = None if scene_season in [None, 'null', ''] else int(scene_season) scene_episode = None if scene_episode in [None, 'null', ''] else int(scene_episode) - action_log = u'Set episode scene numbering to %sx%s for episode %sx%s of "%s"' \ - % (scene_season, scene_episode, for_season, for_episode, show_obj.unique_name) + action_log = f'Set episode scene numbering to {scene_season}x{scene_episode}' \ + f' for episode {for_season}x{for_episode} of "{show_obj.unique_name}"' scene_args.update({'scene_season': scene_season, 'scene_episode': scene_episode}) result = {'forSeason': for_season, 'forEpisode': for_episode, 'sceneSeason': None, 'sceneEpisode': None} else: for_absolute = None if for_absolute in [None, 'null', ''] else int(for_absolute) scene_absolute = None if scene_absolute in [None, 'null', ''] else int(scene_absolute) - action_log = u'Set absolute scene numbering to %s for episode %sx%s of "%s"' \ - % (scene_absolute, for_season, for_episode, show_obj.unique_name) + action_log = f'Set absolute scene numbering to {scene_absolute}' \ + f' for episode {for_season}x{for_episode} of "{show_obj.unique_name}"' ep_args.update({'absolute': for_absolute}) scene_args.update({'absolute_number': for_absolute, 'scene_absolute': scene_absolute, 'anime': True}) result = {'forAbsolute': for_absolute, 'sceneAbsolute': None} @@ -1023,7 +1020,7 @@ def set_scene_numbering_helper(tvid, prodid, for_season=None, for_episode=None, result['success'] = None is not ep_obj and not isinstance(ep_obj, str) if result['success']: - logger.log(action_log, logger.DEBUG) + logger.debug(action_log) set_scene_numbering(**scene_args) show_obj.flush_episodes() if not show_obj.is_anime: diff --git a/sickgear/scheduler.py b/sickgear/scheduler.py index 990df34c..88bc2976 100644 --- a/sickgear/scheduler.py +++ b/sickgear/scheduler.py @@ -110,7 +110,7 @@ class Scheduler(threading.Thread): if should_run and ((self.prevent_cycle_run is not None and self.prevent_cycle_run()) or getattr(self.action, 'prevent_run', False)): - logger.log(u'%s skipping this cycle_time' % self.name, logger.WARNING) + logger.warning(f'{self.name} skipping this cycle_time') # set last_run to only check start_time after another cycle_time self.last_run = current_time should_run = False @@ -120,12 +120,12 @@ class Scheduler(threading.Thread): try: if not self.silent: - logger.log(u"Starting new thread: " + self.name, logger.DEBUG) + logger.debug(f'Starting new thread: {self.name}') self.action.run() except (BaseException, Exception) as e: - logger.log(u"Exception generated in thread " + self.name + ": " + ex(e), logger.ERROR) - logger.log(repr(traceback.format_exc()), logger.ERROR) + logger.error(f'Exception generated in thread {self.name}: {ex(e)}') + logger.error(repr(traceback.format_exc())) finally: if self.force: diff --git a/sickgear/search.py b/sickgear/search.py index d7c87fc1..67f3b22f 100644 --- a/sickgear/search.py +++ b/sickgear/search.py @@ -52,7 +52,7 @@ def _download_result(result): res_provider = result.provider if None is res_provider: - logger.log(u'Invalid provider name - this is a coding error, report it please', logger.ERROR) + logger.error('Invalid provider name - this is a coding error, report it please') return False # NZB files with a URL can just be downloaded from the provider @@ -62,9 +62,9 @@ def _download_result(result): elif 'nzbdata' == result.resultType: # get the final file path to the nzb - file_name = os.path.join(sickgear.NZB_DIR, u'%s.nzb' % result.name) + file_name = os.path.join(sickgear.NZB_DIR, f'{result.name}.nzb') - logger.log(u'Saving NZB to %s' % file_name) + logger.log(f'Saving NZB to {file_name}') new_result = True @@ -77,12 +77,12 @@ def _download_result(result): write_file(file_name, data, raise_exceptions=True) except (EnvironmentError, IOError) as e: - logger.log(u'Error trying to save NZB to black hole: %s' % ex(e), logger.ERROR) + logger.error(f'Error trying to save NZB to black hole: {ex(e)}') new_result = False elif 'torrent' == res_provider.providerType: new_result = res_provider.download_result(result) else: - logger.log(u'Invalid provider type - this is a coding error, report it please', logger.ERROR) + logger.error('Invalid provider type - this is a coding error, report it please') new_result = False return new_result @@ -120,7 +120,7 @@ def snatch_episode(result, end_status=SNATCHED): elif 'nzbget' == sickgear.NZB_METHOD: dl_result = nzbget.send_nzb(result) else: - logger.log(u'Unknown NZB action specified in config: %s' % sickgear.NZB_METHOD, logger.ERROR) + logger.error(f'Unknown NZB action specified in config: {sickgear.NZB_METHOD}') dl_result = False # TORRENT files can be sent to clients or saved to disk @@ -138,7 +138,7 @@ def snatch_episode(result, end_status=SNATCHED): if not result.content and not result.url.startswith('magnet'): result.content = result.provider.get_url(result.url, as_binary=True) if result.provider.should_skip() or not result.content: - logger.log(u'Torrent content failed to download from %s' % result.url, logger.ERROR) + logger.error(f'Torrent content failed to download from {result.url}') return False # Snatches torrent with client dl_result = clients.get_client_instance(sickgear.TORRENT_METHOD)().send_torrent(result) @@ -146,7 +146,7 @@ def snatch_episode(result, end_status=SNATCHED): if result.cache_filepath: helpers.remove_file_perm(result.cache_filepath) else: - logger.log(u'Unknown result type, unable to download it', logger.ERROR) + logger.error('Unknown result type, unable to download it') dl_result = False if not dl_result: @@ -155,7 +155,7 @@ def snatch_episode(result, end_status=SNATCHED): if sickgear.USE_FAILED_DOWNLOADS: failed_history.add_snatched(result) - ui.notifications.message(u'Episode snatched', result.name) + ui.notifications.message('Episode snatched', result.name) history.log_snatch(result) @@ -198,13 +198,13 @@ def pass_show_wordlist_checks(name, show_obj): result = show_name_helpers.contains_any(name, show_obj.rls_ignore_words, rx=show_obj.rls_ignore_words_regex, **re_extras) if None is not result and result: - logger.log(u'Ignored: %s for containing ignore word' % name) + logger.log(f'Ignored: {name} for containing ignore word') return False result = show_name_helpers.contains_any(name, show_obj.rls_require_words, rx=show_obj.rls_require_words_regex, **re_extras) if None is not result and not result: - logger.log(u'Ignored: %s for not containing any required word match' % name) + logger.log(f'Ignored: {name} for not containing any required word match') return False return True @@ -225,8 +225,8 @@ def pick_best_result( :param filter_rls: optional thread name :return: best search result """ - msg = (u'Picking the best result out of %s', u'Checking the best result %s')[1 == len(results)] - logger.log(msg % [x.name for x in results], logger.DEBUG) + msg = ('Picking the best result out of %s', 'Checking the best result %s')[1 == len(results)] + logger.debug(msg % [x.name for x in results]) # find the best result for the current episode best_result = None @@ -252,8 +252,8 @@ def pick_best_result( continue if quality_list and cur_result.quality not in quality_list: - logger.log(u'Rejecting unwanted quality %s for [%s]' % ( - Quality.qualityStrings[cur_result.quality], cur_result.name), logger.DEBUG) + logger.debug(f'Rejecting unwanted quality {Quality.qualityStrings[cur_result.quality]}' + f' for [{cur_result.name}]') continue if not pass_show_wordlist_checks(cur_result.name, show_obj): @@ -262,12 +262,12 @@ def pick_best_result( cur_size = getattr(cur_result, 'size', None) if sickgear.USE_FAILED_DOWNLOADS and None is not cur_size and failed_history.has_failed( cur_result.name, cur_size, cur_result.provider.name): - logger.log(u'Rejecting previously failed [%s]' % cur_result.name) + logger.log(f'Rejecting previously failed [{cur_result.name}]') continue if filter_rls and any([scene_only, non_scene_fallback, scene_rej_nuked, scene_nuked_active]): if show_obj.is_anime: - addendum = u'anime (skipping scene/nuke filter) ' + addendum = 'anime (skipping scene/nuke filter) ' else: scene_contains = False if scene_only and scene_or_contain: @@ -277,25 +277,23 @@ def pick_best_result( scene_contains = True if scene_contains and not scene_rej_nuked: - logger.log(u'Considering title match to \'or contain\' [%s]' % cur_result.name, logger.DEBUG) + logger.debug(f'Considering title match to \'or contain\' [{cur_result.name}]') reject = False else: reject, url = can_reject(cur_result.name) if reject: if isinstance(reject, string_types): if scene_rej_nuked and not scene_nuked_active: - logger.log(u'Rejecting nuked release. Nuke reason [%s] source [%s]' % (reject, url), - logger.DEBUG) + logger.debug(f'Rejecting nuked release. Nuke reason [{reject}] source [{url}]') elif scene_nuked_active: best_fallback_result = best_candidate(best_fallback_result, cur_result) else: - logger.log(u'Considering nuked release. Nuke reason [%s] source [%s]' % (reject, url), - logger.DEBUG) + logger.debug(f'Considering nuked release. Nuke reason [{reject}] source [{url}]') reject = False elif scene_contains or non_scene_fallback: best_fallback_result = best_candidate(best_fallback_result, cur_result) else: - logger.log(u'Rejecting as not scene release listed at any [%s]' % url, logger.DEBUG) + logger.debug(f'Rejecting as not scene release listed at any [{url}]') if reject: continue @@ -303,16 +301,16 @@ def pick_best_result( best_result = best_candidate(best_result, cur_result) if best_result and scene_only and not show_obj.is_anime: - addendum = u'scene release filtered ' + addendum = 'scene release filtered ' elif not best_result and best_fallback_result: - addendum = u'non scene release filtered ' + addendum = 'non scene release filtered ' best_result = best_fallback_result if best_result: - msg = (u'Picked as the best %s[%s]', u'Confirmed as the best %s[%s]')[1 == len(results)] - logger.log(msg % (addendum, best_result.name), logger.DEBUG) + msg = ('Picked as the best %s[%s]', 'Confirmed as the best %s[%s]')[1 == len(results)] + logger.debug(msg % (addendum, best_result.name)) else: - logger.log(u'No result picked.', logger.DEBUG) + logger.debug('No result picked.') return best_result @@ -326,7 +324,7 @@ def best_candidate(best_result, cur_result): :param cur_result: current best search result :return: new best search result """ - logger.log(u'Quality is %s for [%s]' % (Quality.qualityStrings[cur_result.quality], cur_result.name)) + logger.log(f'Quality is {Quality.qualityStrings[cur_result.quality]} for [{cur_result.name}]') if not best_result or best_result.quality < cur_result.quality != Quality.UNKNOWN: best_result = cur_result @@ -337,10 +335,10 @@ def best_candidate(best_result, cur_result): best_result = cur_result elif cur_result.properlevel == best_result.properlevel: if 'xvid' in best_result.name.lower() and 'x264' in cur_result.name.lower(): - logger.log(u'Preferring (x264 over xvid) [%s]' % cur_result.name) + logger.log(f'Preferring (x264 over xvid) [{cur_result.name}]') best_result = cur_result elif re.search('(?i)(h.?|x)264', best_result.name) and re.search('(?i)((h.?|x)265|hevc)', cur_result.name): - logger.log(u'Preferring (x265 over x264) [%s]' % cur_result.name) + logger.log(f'Preferring (x265 over x264) [{cur_result.name}]') best_result = cur_result elif 'internal' in best_result.name.lower() and 'internal' not in cur_result.name.lower(): best_result = cur_result @@ -358,7 +356,7 @@ def is_final_result(result): returns True, if not then it's False """ - logger.log(u'Checking if searching should continue after finding %s' % result.name, logger.DEBUG) + logger.debug(f'Checking if searching should continue after finding {result.name}') show_obj = result.ep_obj_list[0].show_obj @@ -399,8 +397,7 @@ def is_first_best_match(ep_status, result): :return: """ - logger.log(u'Checking if the first best quality match should be archived for episode %s' % - result.name, logger.DEBUG) + logger.debug(f'Checking if the first best quality match should be archived for episode {result.name}') show_obj = result.ep_obj_list[0].show_obj cur_status, cur_quality = Quality.split_composite_status(ep_status) @@ -570,7 +567,7 @@ def wanted_episodes(show_obj, # type: TVShow ['%d unaired episode%s', total_unaired]: if 0 < total: actions.append(msg % (total, helpers.maybe_plural(total))) - logger.log(u'We want %s for %s' % (' and '.join(actions), show_obj.unique_name)) + logger.log(f'We want {" and ".join(actions)} for {show_obj.unique_name}') return wanted @@ -602,8 +599,8 @@ def search_for_needed_episodes(ep_obj_list): for cur_ep_obj in ep_obj_search_result_list: if cur_ep_obj.show_obj.paused: - logger.debug(u'Show %s is paused, ignoring all RSS items for %s' % - (cur_ep_obj.show_obj.unique_name, cur_ep_obj.pretty_name())) + logger.debug(f'Show {cur_ep_obj.show_obj.unique_name} is paused,' + f' ignoring all RSS items for {cur_ep_obj.pretty_name()}') continue # find the best result for the current episode @@ -612,7 +609,7 @@ def search_for_needed_episodes(ep_obj_list): # if all results were rejected move on to the next episode if not best_result: - logger.log(u'All found results for %s were rejected.' % cur_ep_obj.pretty_name(), logger.DEBUG) + logger.debug(f'All found results for {cur_ep_obj.pretty_name()} were rejected.') continue # if it's already in the list (from another provider) and the newly found quality is no better, then skip it @@ -639,11 +636,10 @@ def search_for_needed_episodes(ep_obj_list): threading.current_thread().name = orig_thread_name if not len(providers): - logger.log('No NZB/Torrent providers in Media Providers/Options are enabled to match recent episodes', - logger.WARNING) + logger.warning('No NZB/Torrent providers in Media Providers/Options are enabled to match recent episodes') elif not search_done: - logger.log('Failed recent search of %s enabled provider%s. More info in debug log.' % ( - len(providers), helpers.maybe_plural(providers)), logger.ERROR) + logger.error(f'Failed recent search of {len(providers)} enabled provider{helpers.maybe_plural(providers)}.' + f' More info in debug log.') return list(found_results.values()) @@ -713,9 +709,9 @@ def _search_provider_thread(provider, provider_results, show_obj, ep_obj_list, m search_count += 1 if 'eponly' == search_mode: - logger.log(u'Performing episode search for %s' % show_obj.unique_name) + logger.log(f'Performing episode search for {show_obj.unique_name}') else: - logger.log(u'Performing season pack search for %s' % show_obj.unique_name) + logger.log(f'Performing season pack search for {show_obj.unique_name}') try: provider.cache.clear_cache() @@ -726,10 +722,10 @@ def _search_provider_thread(provider, provider_results, show_obj, ep_obj_list, m len(v), (('multiep', 'season')[SEASON_RESULT == k], 'episode')['ep' in search_mode], helpers.maybe_plural(v)) for (k, v) in iteritems(search_result_list)])) except exceptions_helper.AuthException as e: - logger.error(u'Authentication error: %s' % ex(e)) + logger.error(f'Authentication error: {ex(e)}') break except (BaseException, Exception) as e: - logger.error(u'Error while searching %s, skipping: %s' % (provider.name, ex(e))) + logger.error(f'Error while searching {provider.name}, skipping: {ex(e)}') logger.error(traceback.format_exc()) break @@ -752,7 +748,7 @@ def _search_provider_thread(provider, provider_results, show_obj, ep_obj_list, m break search_mode = '%sonly' % ('ep', 'sp')['ep' in search_mode] - logger.log(u'Falling back to %s search ...' % ('season pack', 'episode')['ep' in search_mode]) + logger.log(f'Falling back to {("season pack", "episode")["ep" in search_mode]} search ...') if not provider_results: logger.log('No suitable result at [%s]' % provider.name) @@ -804,7 +800,7 @@ def cache_torrent_file( torrent_name = search_result.provider.regulate_cache_torrent_file(torrent_name) if not pick_best_result([search_result], show_obj, **kwargs) or \ not show_name_helpers.pass_wordlist_checks(torrent_name, indexer_lookup=False, show_obj=show_obj): - logger.log(u'Ignored %s that contains %s (debug log has detail)' % (result_name, torrent_name)) + logger.log(f'Ignored {result_name} that contains {torrent_name} (debug log has detail)') return return search_result @@ -848,7 +844,7 @@ def search_providers( # create a thread for each provider to search for cur_provider in provider_list: if cur_provider.anime_only and not show_obj.is_anime: - logger.debug(u'%s is not an anime, skipping' % show_obj.unique_name) + logger.debug(f'{show_obj.unique_name} is not an anime, skipping') continue provider_id = cur_provider.get_id() @@ -891,14 +887,14 @@ def search_providers( for cur_result in found_results[provider_id][cur_episode]: if Quality.UNKNOWN != cur_result.quality and highest_quality_overall < cur_result.quality: highest_quality_overall = cur_result.quality - logger.debug(u'%s is the highest quality of any match' % Quality.qualityStrings[highest_quality_overall]) + logger.debug(f'{Quality.qualityStrings[highest_quality_overall]} is the highest quality of any match') # see if every episode is wanted if best_season_result: # get the quality of the season nzb season_qual = best_season_result.quality - logger.log(u'%s is the quality of the season %s' % (Quality.qualityStrings[season_qual], - best_season_result.provider.providerType), logger.DEBUG) + logger.debug(f'{Quality.qualityStrings[season_qual]} is the quality of the season' + f' {best_season_result.provider.providerType}') my_db = db.DBConnection() sql = 'SELECT season, episode' \ @@ -907,8 +903,8 @@ def search_providers( (show_obj.tvid, show_obj.prodid, ','.join([str(x.season) for x in ep_obj_list])) ep_nums = [(int(x['season']), int(x['episode'])) for x in my_db.select(sql)] - logger.log(u'Executed query: [%s]' % sql) - logger.log(u'Episode list: %s' % ep_nums, logger.DEBUG) + logger.log(f'Executed query: [{sql}]') + logger.debug(f'Episode list: {ep_nums}') all_wanted = True any_wanted = False @@ -921,8 +917,8 @@ def search_providers( # if we need every ep in the season and there's nothing better, # then download this and be done with it (unless single episodes are preferred) if all_wanted and highest_quality_overall == best_season_result.quality: - logger.log(u'Every episode in this season is needed, downloading the whole %s %s' % - (best_season_result.provider.providerType, best_season_result.name)) + logger.log(f'Every episode in this season is needed, downloading the whole' + f' {best_season_result.provider.providerType} {best_season_result.name}') ep_obj_list = [] for ep_num in ep_nums: ep_obj_list.append(show_obj.get_episode(ep_num[0], ep_num[1])) @@ -931,11 +927,11 @@ def search_providers( return [best_season_result] elif not any_wanted: - logger.log(u'No episodes from this season are wanted at this quality, ignoring the result of ' + - best_season_result.name, logger.DEBUG) + logger.debug(f'No episodes from this season are wanted at this quality,' + f' ignoring the result of {best_season_result.name}') else: if GenericProvider.NZB == best_season_result.provider.providerType: - logger.log(u'Breaking apart the NZB and adding the individual ones to our results', logger.DEBUG) + logger.debug('Breaking apart the NZB and adding the individual ones to our results') # if not, break it apart and add them as the lowest priority results individual_results = nzbSplitter.split_result(best_season_result) @@ -959,8 +955,8 @@ def search_providers( else: # Season result from Torrent Provider must be a full-season torrent, creating multi-ep result for it - logger.log(u'Adding multi episode result for full season torrent. In your torrent client, set ' + - u'the episodes that you do not want to "don\'t download"') + logger.log('Adding multi episode result for full season torrent. In your torrent client,' + ' set the episodes that you do not want to "don\'t download"') ep_obj_list = [] for ep_num in ep_nums: ep_obj_list.append(show_obj.get_episode(ep_num[0], ep_num[1])) @@ -982,11 +978,11 @@ def search_providers( if MULTI_EP_RESULT in found_results[provider_id]: for multi_result in found_results[provider_id][MULTI_EP_RESULT]: - logger.log(u'Checking usefulness of multi episode result [%s]' % multi_result.name, logger.DEBUG) + logger.debug(f'Checking usefulness of multi episode result [{multi_result.name}]') if sickgear.USE_FAILED_DOWNLOADS and failed_history.has_failed(multi_result.name, multi_result.size, multi_result.provider.name): - logger.log(u'Rejecting previously failed multi episode result [%s]' % multi_result.name) + logger.log(f'Rejecting previously failed multi episode result [{multi_result.name}]') continue # see how many of the eps that this result covers aren't covered by single results @@ -1000,12 +996,12 @@ def search_providers( else: not_needed_eps.append(ep_num) - logger.log(u'Single episode check result is... needed episodes: %s, not needed episodes: %s' % - (needed_eps, not_needed_eps), logger.DEBUG) + logger.debug(f'Single episode check result is... needed episodes: {needed_eps},' + f' not needed episodes: {not_needed_eps}') if not not_needed_eps: - logger.log(u'All of these episodes were covered by single episode results, ' + - 'ignoring this multi episode result', logger.DEBUG) + logger.debug('All of these episodes were covered by single episode results,' + ' ignoring this multi episode result') continue # check if these eps are already covered by another multi-result @@ -1018,13 +1014,13 @@ def search_providers( else: multi_needed_eps.append(ep_num) - logger.log(u'Multi episode check result is... multi needed episodes: ' + - '%s, multi not needed episodes: %s' % (multi_needed_eps, multi_not_needed_eps), logger.DEBUG) + logger.debug(f'Multi episode check result is...' + f' multi needed episodes: {multi_needed_eps},' + f' multi not needed episodes: {multi_not_needed_eps}') if not multi_needed_eps: - logger.log(u'All of these episodes were covered by another multi episode nzb, ' + - 'ignoring this multi episode result', - logger.DEBUG) + logger.debug('All of these episodes were covered by another multi episode nzb,' + ' ignoring this multi episode result') continue # if we're keeping this multi-result then remember it @@ -1035,8 +1031,8 @@ def search_providers( for ep_obj in multi_result.ep_obj_list: ep_num = ep_obj.episode if ep_num in found_results[provider_id]: - logger.log(u'A needed multi episode result overlaps with a single episode result for episode ' + - '#%s, removing the single episode results from the list' % ep_num, logger.DEBUG) + logger.debug(f'A needed multi episode result overlaps with a single episode result' + f' for episode #{ep_num}, removing the single episode results from the list') del found_results[provider_id][ep_num] # of all the single ep results narrow it down to the best one for each episode @@ -1119,8 +1115,8 @@ def search_providers( if not len(provider_list): logger.warning('No NZB/Torrent providers in Media Providers/Options are allowed for active searching') elif not search_done: - logger.log('Failed active search of %s enabled provider%s. More info in debug log.' % ( - len(provider_list), helpers.maybe_plural(provider_list)), logger.ERROR) + logger.error(f'Failed active search of {len(provider_list)}' + f' enabled provider{helpers.maybe_plural(provider_list)}. More info in debug log.') elif not any(final_results): logger.log('No suitable candidates') diff --git a/sickgear/search_backlog.py b/sickgear/search_backlog.py index fa603986..e2936f52 100644 --- a/sickgear/search_backlog.py +++ b/sickgear/search_backlog.py @@ -105,7 +105,7 @@ class BacklogSearcher(object): def am_running(self): # type: (...) -> bool - logger.log(u'amWaiting: ' + str(self.amWaiting) + ', amActive: ' + str(self.amActive), logger.DEBUG) + logger.debug(f'amWaiting: {self.amWaiting}, amActive: {self.amActive}') return (not self.amWaiting) and self.amActive def add_backlog_item(self, @@ -197,7 +197,7 @@ class BacklogSearcher(object): :rtype: None """ if self.amActive and not which_shows: - logger.log(u'Backlog is still running, not starting it again', logger.DEBUG) + logger.debug('Backlog is still running, not starting it again') return if which_shows: @@ -216,12 +216,12 @@ class BacklogSearcher(object): and GenericProvider.TORRENT == x.providerType, sickgear.providers.sorted_sources())) if not any_torrent_enabled: - logger.log('Last scheduled backlog run was within the last day, skipping this run.', logger.DEBUG) + logger.debug('Last scheduled backlog run was within the last day, skipping this run.') return if not self.providers_active(any_torrent_enabled, standard_backlog): - logger.log('No NZB/Torrent provider has active searching enabled in config/Media Providers,' - ' cannot start backlog.', logger.WARNING) + logger.warning('No NZB/Torrent provider has active searching enabled in config/Media Providers,' + ' cannot start backlog.') return self._get_last_backlog() @@ -234,14 +234,14 @@ class BacklogSearcher(object): limited_backlog = False if standard_backlog and (any_torrent_enabled or sickgear.BACKLOG_NOFULL): - logger.log(u'Running limited backlog for episodes missed during the last %s day(s)' % - str(sickgear.BACKLOG_LIMITED_PERIOD)) + logger.log(f'Running limited backlog for episodes missed during the last' + f' {sickgear.BACKLOG_LIMITED_PERIOD} day(s)') from_date = limited_from_date limited_backlog = True runparts = [] if standard_backlog and not any_torrent_enabled and sickgear.BACKLOG_NOFULL: - logger.log(u'Skipping automated full backlog search because it is disabled in search settings') + logger.log('Skipping automated full backlog search because it is disabled in search settings') my_db = db.DBConnection('cache.db') if standard_backlog and not any_torrent_enabled and not sickgear.BACKLOG_NOFULL: @@ -333,7 +333,7 @@ class BacklogSearcher(object): @staticmethod def _get_last_runtime(): - logger.log('Retrieving the last runtime of Backlog from the DB', logger.DEBUG) + logger.debug('Retrieving the last runtime of Backlog from the DB') my_db = db.DBConnection() sql_result = my_db.select('SELECT * FROM info') @@ -350,7 +350,7 @@ class BacklogSearcher(object): return last_run_time def _set_last_runtime(self, when): - logger.log('Setting the last backlog runtime in the DB to %s' % when, logger.DEBUG) + logger.debug('Setting the last backlog runtime in the DB to %s' % when) my_db = db.DBConnection() sql_result = my_db.select('SELECT * FROM info') @@ -369,7 +369,7 @@ class BacklogSearcher(object): def _get_last_backlog(self): - logger.log('Retrieving the last check time from the DB', logger.DEBUG) + logger.debug('Retrieving the last check time from the DB') my_db = db.DBConnection() sql_result = my_db.select('SELECT * FROM info') @@ -389,7 +389,7 @@ class BacklogSearcher(object): @staticmethod def _set_last_backlog(when): - logger.log('Setting the last backlog in the DB to %s' % when, logger.DEBUG) + logger.debug('Setting the last backlog in the DB to %s' % when) my_db = db.DBConnection() sql_result = my_db.select('SELECT * FROM info') diff --git a/sickgear/search_queue.py b/sickgear/search_queue.py index 88d430d2..200910b9 100644 --- a/sickgear/search_queue.py +++ b/sickgear/search_queue.py @@ -83,7 +83,7 @@ class SearchQueue(generic_queue.GenericQueue): continue self.add_item(item, add_to_db=False) except (BaseException, Exception) as e: - logger.log('Exception loading queue %s: %s' % (self.__class__.__name__, ex(e)), logger.ERROR) + logger.error('Exception loading queue %s: %s' % (self.__class__.__name__, ex(e))) def _clear_sql(self): return [ @@ -322,7 +322,7 @@ class SearchQueue(generic_queue.GenericQueue): # manual and failed searches generic_queue.GenericQueue.add_item(self, item, add_to_db=add_to_db) else: - logger.log(u'Not adding item, it\'s already in the queue', logger.DEBUG) + logger.debug("Not adding item, it's already in the queue") class RecentSearchQueueItem(generic_queue.QueueItem): @@ -367,24 +367,23 @@ class RecentSearchQueueItem(generic_queue.QueueItem): self._check_for_propers(needed) if not self.ep_obj_list: - logger.log(u'No search of cache for episodes required') + logger.log('No search of cache for episodes required') self.success = True else: num_shows = len(set([ep_obj.show_obj.name for ep_obj in self.ep_obj_list])) - logger.log(u'Found %d needed episode%s spanning %d show%s' - % (len(self.ep_obj_list), helpers.maybe_plural(self.ep_obj_list), - num_shows, helpers.maybe_plural(num_shows))) + logger.log(f'Found {len(self.ep_obj_list):d} needed episode{helpers.maybe_plural(self.ep_obj_list)}' + f' spanning {num_shows:d} show{helpers.maybe_plural(num_shows)}') try: - logger.log(u'Beginning recent search for episodes') + logger.log('Beginning recent search for episodes') # noinspection PyTypeChecker search_results = search.search_for_needed_episodes(self.ep_obj_list) if not len(search_results): - logger.log(u'No needed episodes found') + logger.log('No needed episodes found') else: for result in search_results: - logger.log(u'Downloading %s from %s' % (result.name, result.provider.name)) + logger.log(f'Downloading {result.name} from {result.provider.name}') self.success = search.snatch_episode(result) if self.success: for ep_obj in result.ep_obj_list: @@ -399,7 +398,7 @@ class RecentSearchQueueItem(generic_queue.QueueItem): helpers.cpu_sleep() except (BaseException, Exception): - logger.log(traceback.format_exc(), logger.ERROR) + logger.error(traceback.format_exc()) if None is self.success: self.success = False @@ -497,13 +496,13 @@ class RecentSearchQueueItem(generic_queue.QueueItem): wanted |= (False, True)[common.WANTED == ep_obj.status] if not wanted: - logger.log(u'No unaired episodes marked wanted') + logger.log('No unaired episodes marked wanted') if 0 < len(sql_l): my_db = db.DBConnection() my_db.mass_action(sql_l) if wanted: - logger.log(u'Found new episodes marked wanted') + logger.log('Found new episodes marked wanted') @staticmethod def update_providers(needed=common.NeededQualities(need_all=True)): @@ -533,8 +532,7 @@ class RecentSearchQueueItem(generic_queue.QueueItem): threads[-1].start() if not len(providers): - logger.log('No NZB/Torrent providers in Media Providers/Options are enabled to match recent episodes', - logger.WARNING) + logger.warning('No NZB/Torrent providers in Media Providers/Options are enabled to match recent episodes') if threads: # wait for all threads to finish @@ -637,7 +635,7 @@ class ManualSearchQueueItem(BaseSearchQueueItem): generic_queue.QueueItem.run(self) try: - logger.log(u'Beginning manual search for: [%s]' % self.segment.pretty_name()) + logger.log(f'Beginning manual search for: [{self.segment.pretty_name()}]') self.started = True ep_count, ep_count_scene = get_aired_in_season(self.show_obj) @@ -656,7 +654,7 @@ class ManualSearchQueueItem(BaseSearchQueueItem): if search_result: for result in search_result: # type: sickgear.classes.NZBSearchResult - logger.log(u'Downloading %s from %s' % (result.name, result.provider.name)) + logger.log(f'Downloading {result.name} from {result.provider.name}') self.success = search.snatch_episode(result) for ep_obj in result.ep_obj_list: # type: sickgear.tv.TVEpisode self.snatched_eps.add(SimpleNamespace(tvid_prodid=ep_obj.show_obj.tvid_prodid, @@ -673,12 +671,12 @@ class ManualSearchQueueItem(BaseSearchQueueItem): break else: ui.notifications.message('No downloads found', - u'Could not find a download for %s' % self.segment.pretty_name()) + f'Could not find a download for {self.segment.pretty_name()}') - logger.log(u'Unable to find a download for: [%s]' % self.segment.pretty_name()) + logger.log(f'Unable to find a download for: [{self.segment.pretty_name()}]') except (BaseException, Exception): - logger.log(traceback.format_exc(), logger.ERROR) + logger.error(traceback.format_exc()) finally: # Keep a list with the last executed searches @@ -729,7 +727,7 @@ class BacklogQueueItem(BaseSearchQueueItem): for ep_obj in self.segment: # type: sickgear.tv.TVEpisode set_wanted_aired(ep_obj, True, ep_count, ep_count_scene) - logger.log(u'Beginning backlog search for: [%s]' % self.show_obj.unique_name) + logger.log(f'Beginning backlog search for: [{self.show_obj.unique_name}]') search_result = search.search_providers( self.show_obj, self.segment, False, try_other_searches=(not self.standard_backlog or not self.limited_backlog), @@ -737,7 +735,7 @@ class BacklogQueueItem(BaseSearchQueueItem): if search_result: for result in search_result: # type: sickgear.classes.NZBSearchResult - logger.log(u'Downloading %s from %s' % (result.name, result.provider.name)) + logger.log(f'Downloading {result.name} from {result.provider.name}') if search.snatch_episode(result): for ep_obj in result.ep_obj_list: # type: sickgear.tv.TVEpisode self.snatched_eps.add(SimpleNamespace(tvid_prodid=ep_obj.show_obj.tvid_prodid, @@ -750,10 +748,10 @@ class BacklogQueueItem(BaseSearchQueueItem): helpers.cpu_sleep() else: - logger.log(u'No needed episodes found during backlog search for: [%s]' % self.show_obj.unique_name) + logger.log(f'No needed episodes found during backlog search for: [{self.show_obj.unique_name}]') except (BaseException, Exception): is_error = True - logger.log(traceback.format_exc(), logger.ERROR) + logger.error(traceback.format_exc()) finally: logger.log('Completed backlog search %sfor: [%s]' @@ -783,7 +781,7 @@ class FailedQueueItem(BaseSearchQueueItem): ep_count, ep_count_scene = get_aired_in_season(self.show_obj) for ep_obj in self.segment: # type: sickgear.tv.TVEpisode - logger.log(u'Marking episode as bad: [%s]' % ep_obj.pretty_name()) + logger.log(f'Marking episode as bad: [{ep_obj.pretty_name()}]') failed_history.set_episode_failed(ep_obj) (release, provider) = failed_history.find_release(ep_obj) @@ -792,14 +790,14 @@ class FailedQueueItem(BaseSearchQueueItem): failed_history.add_failed(release) history.log_failed(ep_obj, release, provider) - logger.log(u'Beginning failed download search for: [%s]' % ep_obj.pretty_name()) + logger.log(f'Beginning failed download search for: [{ep_obj.pretty_name()}]') set_wanted_aired(ep_obj, True, ep_count, ep_count_scene, manual=True) search_result = search.search_providers(self.show_obj, self.segment, True, try_other_searches=True) or [] for result in search_result: # type: sickgear.classes.NZBSearchResult - logger.log(u'Downloading %s from %s' % (result.name, result.provider.name)) + logger.log(f'Downloading {result.name} from {result.provider.name}') if search.snatch_episode(result): for ep_obj in result.ep_obj_list: # type: sickgear.tv.TVEpisode self.snatched_eps.add(SimpleNamespace(tvid_prodid=ep_obj.show_obj.tvid_prodid, @@ -813,9 +811,9 @@ class FailedQueueItem(BaseSearchQueueItem): helpers.cpu_sleep() else: pass - # logger.log(u'No valid episode found to retry for: [%s]' % self.segment.pretty_name()) + # logger.log(f'No valid episode found to retry for: [{self.segment.pretty_name()}]') except (BaseException, Exception): - logger.log(traceback.format_exc(), logger.ERROR) + logger.error(traceback.format_exc()) finally: # Keep a list with the last executed searches diff --git a/sickgear/sgdatetime.py b/sickgear/sgdatetime.py index f963c76d..1e6ffaf0 100644 --- a/sickgear/sgdatetime.py +++ b/sickgear/sgdatetime.py @@ -211,7 +211,7 @@ class SGDatetime(datetime.datetime): obj = (dt, self)[self is not None] # type: datetime.datetime try: if None is not obj: - strd = u'%s, %s' % ( + strd = '%s, %s' % ( SGDatetime.sbstrftime(obj, (sickgear.DATE_PRESET, d_preset)[None is not d_preset]), SGDatetime.sbftime(dt, show_seconds, t_preset, False, markup)) diff --git a/sickgear/show_name_helpers.py b/sickgear/show_name_helpers.py index 0ee26627..f688c1d5 100644 --- a/sickgear/show_name_helpers.py +++ b/sickgear/show_name_helpers.py @@ -62,14 +62,14 @@ def pass_wordlist_checks(name, # type: AnyStr """ if parse: - err_msg = u'Unable to parse the filename %s into a valid ' % name + err_msg = f'Unable to parse the filename {name} into a valid ' try: NameParser(indexer_lookup=indexer_lookup).parse(name) except InvalidNameException: - logger.log(err_msg + 'episode', logger.DEBUG) + logger.debug(err_msg + 'episode') return False except InvalidShowException: - logger.log(err_msg + 'show', logger.DEBUG) + logger.debug(err_msg + 'show') return False word_list = {'sub(bed|ed|pack|s)', '(dk|fin|heb|kor|nor|nordic|pl|swe)sub(bed|ed|s)?', @@ -94,7 +94,7 @@ def pass_wordlist_checks(name, # type: AnyStr result = result or contains_any(name, word_list, rx=sickgear.IGNORE_WORDS_REGEX) if None is not result and result: - logger.log(u'Ignored: %s for containing ignore word' % name, logger.DEBUG) + logger.debug(f'Ignored: {name} for containing ignore word') return False result = None @@ -108,7 +108,7 @@ def pass_wordlist_checks(name, # type: AnyStr # if any of the good strings aren't in the name then say no result = result or not_contains_any(name, req_word_list, rx=sickgear.REQUIRE_WORDS_REGEX) if None is not result and result: - logger.log(u'Ignored: %s for not containing required word match' % name, logger.DEBUG) + logger.debug(f'Ignored: {name} for not containing required word match') return False return True @@ -160,7 +160,7 @@ def contains_any(subject, # type: AnyStr if (match and not invert) or (not match and invert): msg = match and not invert and 'Found match' or '' msg = not match and invert and 'No match found' or msg - logger.log(u'%s from pattern: %s in text: %s ' % (msg, rc_filter.pattern, subject), logger.DEBUG) + logger.debug(f'{msg} from pattern: {rc_filter.pattern} in text: {subject} ') return True return False return None @@ -190,13 +190,11 @@ def compile_word_list(lookup_words, # type: Union[AnyStr, Set[AnyStr]] subject = search_raw and re.escape(word) or re.sub(r'([\" \'])', r'\\\1', word) result.append(re.compile('(?i)%s%s%s' % (re_prefix, subject, re_suffix))) except re.error as e: - logger.log(u'Failure to compile filter expression: %s ... Reason: %s' % (word, ex(e)), - logger.DEBUG) + logger.debug(f'Failure to compile filter expression: {word} ... Reason: {ex(e)}') diff = len(lookup_words) - len(result) if diff: - logger.log(u'From %s expressions, %s was discarded during compilation' % (len(lookup_words), diff), - logger.DEBUG) + logger.debug(f'From {len(lookup_words)} expressions, {diff} was discarded during compilation') return result @@ -430,7 +428,7 @@ def determine_release_name(dir_name=None, nzb_name=None): """ if None is not nzb_name: - logger.log(u'Using nzb name for release name.') + logger.log('Using nzb name for release name.') return nzb_name.rpartition('.')[0] if not dir_name or not os.path.isdir(dir_name): @@ -446,7 +444,7 @@ def determine_release_name(dir_name=None, nzb_name=None): if 1 == len(results): found_file = results[0].rpartition('.')[0] if pass_wordlist_checks(found_file): - logger.log(u'Release name (%s) found from file (%s)' % (found_file, results[0])) + logger.log(f'Release name ({found_file}) found from file ({results[0]})') return found_file.rpartition('.')[0] # If that fails, we try the folder @@ -455,7 +453,7 @@ def determine_release_name(dir_name=None, nzb_name=None): # NOTE: Multiple failed downloads will change the folder name. # (e.g., appending #s) # Should we handle that? - logger.log(u'Folder name (%s) appears to be a valid release name. Using it.' % folder) + logger.log(f'Folder name ({folder}) appears to be a valid release name. Using it.') return folder return None diff --git a/sickgear/show_queue.py b/sickgear/show_queue.py index 03046c93..e083bb40 100644 --- a/sickgear/show_queue.py +++ b/sickgear/show_queue.py @@ -126,7 +126,7 @@ class ShowQueue(generic_queue.GenericQueue): lang=cur_row['lang'], uid=cur_row['uid'], add_to_db=False) except (BaseException, Exception) as e: - logger.log('Exception loading queue %s: %s' % (self.__class__.__name__, ex(e)), logger.ERROR) + logger.error('Exception loading queue %s: %s' % (self.__class__.__name__, ex(e))) def save_item(self, item): # type: (ShowQueueItem) -> None @@ -223,7 +223,7 @@ class ShowQueue(generic_queue.GenericQueue): else: my_db.action('DELETE FROM tv_src_switch WHERE uid = ?', [item.uid]) except (BaseException, Exception) as e: - logger.log('Exception deleting item %s from db: %s' % (item, ex(e)), logger.ERROR) + logger.error('Exception deleting item %s from db: %s' % (item, ex(e))) else: generic_queue.GenericQueue.delete_item(self, item) @@ -544,8 +544,8 @@ class ShowQueue(generic_queue.GenericQueue): if ((not after_update and self.is_being_updated(show_obj)) or self.is_in_update_queue(show_obj)) and not force: - logger.log('Skipping this refresh as there is already an update queued or' - ' in progress and a refresh is done at the end of an update anyway.', logger.DEBUG) + logger.debug('Skipping this refresh as there is already an update queued or' + ' in progress and a refresh is done at the end of an update anyway.') return if self.is_show_being_switched(show_obj): @@ -976,22 +976,22 @@ class QueueItemAdd(ShowQueueItem): if self.lang: tvinfo_config['language'] = self.lang - logger.log(u'' + str(sickgear.TVInfoAPI(self.tvid).name) + ': ' + repr(tvinfo_config)) + logger.log(f'{sickgear.TVInfoAPI(self.tvid).name}: {repr(tvinfo_config)}') t = sickgear.TVInfoAPI(self.tvid).setup(**tvinfo_config) s = t.get_show(self.prodid, load_episodes=False, language=self.lang) if getattr(t, 'show_not_found', False): - logger.log('Show %s was not found on %s, maybe show was deleted' % - (self.show_name, sickgear.TVInfoAPI(self.tvid).name), logger.ERROR) + logger.error(f'Show {self.show_name} was not found on {sickgear.TVInfoAPI(self.tvid).name},' + f' maybe show was deleted') self._finish_early() return # this usually only happens if they have an NFO in their show dir # which gave us a TV info source ID that has no proper english version of the show if None is getattr(s, 'seriesname', None): - logger.log('Show in %s has no name on %s, probably the wrong language used to search with.' % - (self.showDir, sickgear.TVInfoAPI(self.tvid).name), logger.ERROR) + logger.error(f'Show in {self.showDir} has no name on {sickgear.TVInfoAPI(self.tvid).name},' + f' probably the wrong language used to search with.') ui.notifications.error('Unable to add show', 'Show in %s has no name on %s, probably the wrong language.' ' Delete .nfo and add manually in the correct language.' % @@ -999,8 +999,7 @@ class QueueItemAdd(ShowQueueItem): self._finish_early() return except (BaseException, Exception): - logger.log('Unable to find show ID:%s on TV info: %s' % (self.prodid, sickgear.TVInfoAPI(self.tvid).name), - logger.ERROR) + logger.error('Unable to find show ID:%s on TV info: %s' % (self.prodid, sickgear.TVInfoAPI(self.tvid).name)) ui.notifications.error('Unable to add show', 'Unable to look up the show in %s on %s using ID %s, not using the NFO.' ' Delete .nfo and try adding manually again.' % @@ -1046,9 +1045,7 @@ class QueueItemAdd(ShowQueueItem): self.show_obj.sports = 1 except BaseTVinfoException as e: - logger.log( - 'Unable to add show due to an error with %s: %s' % (sickgear.TVInfoAPI(self.tvid).name, ex(e)), - logger.ERROR) + logger.error(f'Unable to add show due to an error with {sickgear.TVInfoAPI(self.tvid).name}: {ex(e)}') if self.show_obj: ui.notifications.error('Unable to add %s due to an error with %s' % (self.show_obj.unique_name, sickgear.TVInfoAPI(self.tvid).name)) @@ -1059,14 +1056,14 @@ class QueueItemAdd(ShowQueueItem): return except exceptions_helper.MultipleShowObjectsException: - logger.log('The show in %s is already in your show list, skipping' % self.showDir, logger.ERROR) + logger.error('The show in %s is already in your show list, skipping' % self.showDir) ui.notifications.error('Show skipped', 'The show in %s is already in your show list' % self.showDir) self._finish_early() return except (BaseException, Exception) as e: - logger.log('Error trying to add show: %s' % ex(e), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('Error trying to add show: %s' % ex(e)) + logger.error(traceback.format_exc()) self._finish_early() raise @@ -1075,8 +1072,8 @@ class QueueItemAdd(ShowQueueItem): try: self.show_obj.save_to_db() except (BaseException, Exception) as e: - logger.log('Error saving the show to the database: %s' % ex(e), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('Error saving the show to the database: %s' % ex(e)) + logger.error(traceback.format_exc()) self._finish_early() raise @@ -1092,16 +1089,15 @@ class QueueItemAdd(ShowQueueItem): self.show_obj.load_episodes_from_tvinfo(tvinfo_data=(None, result)[ self.show_obj.prodid == getattr(result, 'id', None)]) except (BaseException, Exception) as e: - logger.log( - 'Error with %s, not creating episode list: %s' % (sickgear.TVInfoAPI(self.show_obj.tvid).name, ex(e)), - logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error(f'Error with {sickgear.TVInfoAPI(self.show_obj.tvid).name},' + f' not creating episode list: {ex(e)}') + logger.error(traceback.format_exc()) try: self.show_obj.load_episodes_from_dir() except (BaseException, Exception) as e: - logger.log('Error searching directory for episodes: %s' % ex(e), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('Error searching directory for episodes: %s' % ex(e)) + logger.error(traceback.format_exc()) # if they gave a custom status then change all the eps to it my_db = db.DBConnection() @@ -1149,8 +1145,8 @@ class QueueItemAdd(ShowQueueItem): try: self.show_obj.save_to_db() except (BaseException, Exception) as e: - logger.log('Error saving the show to the database: %s' % ex(e), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('Error saving the show to the database: %s' % ex(e)) + logger.error(traceback.format_exc()) self._finish_early() raise @@ -1288,8 +1284,7 @@ class QueueItemRename(ShowQueueItem): try: _ = self.show_obj.location except exceptions_helper.ShowDirNotFoundException: - logger.log('Can\'t perform rename on %s when the show directory is missing.' - % self.show_obj.unique_name, logger.WARNING) + logger.warning(f'Can\'t perform rename on {self.show_obj.unique_name} when the show directory is missing.') return ep_obj_rename_list = [] @@ -1386,7 +1381,7 @@ class QueueItemUpdate(ShowQueueItem): logger.log('Beginning update of %s' % self.show_obj.unique_name) - logger.log('Retrieving show info from %s' % sickgear.TVInfoAPI(self.show_obj.tvid).name, logger.DEBUG) + logger.debug('Retrieving show info from %s' % sickgear.TVInfoAPI(self.show_obj.tvid).name) try: result = self.show_obj.load_from_tvinfo(cache=not self.force, tvinfo_data=self.tvinfo_data, scheduled_update=self.scheduled_update, switch=self.switch) @@ -1395,12 +1390,11 @@ class QueueItemUpdate(ShowQueueItem): elif not self.show_obj.prodid == getattr(self.tvinfo_data, 'id', None): self.tvinfo_data = result except BaseTVinfoAttributenotfound as e: - logger.log('Data retrieved from %s was incomplete, aborting: %s' % - (sickgear.TVInfoAPI(self.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Data retrieved from {sickgear.TVInfoAPI(self.show_obj.tvid).name} was incomplete,' + f' aborting: {ex(e)}') return except BaseTVinfoError as e: - logger.log('Unable to contact %s, aborting: %s' % (sickgear.TVInfoAPI(self.show_obj.tvid).name, ex(e)), - logger.WARNING) + logger.warning('Unable to contact %s, aborting: %s' % (sickgear.TVInfoAPI(self.show_obj.tvid).name, ex(e))) return if self.force_web: @@ -1409,22 +1403,22 @@ class QueueItemUpdate(ShowQueueItem): try: self.show_obj.save_to_db() except (BaseException, Exception) as e: - logger.log('Error saving the show to the database: %s' % ex(e), logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('Error saving the show to the database: %s' % ex(e)) + logger.error(traceback.format_exc()) # get episode list from DB - logger.log('Loading all episodes from the database', logger.DEBUG) + logger.debug('Loading all episodes from the database') db_ep_obj_list = self.show_obj.load_episodes_from_db(update=True) # get episode list from TVDB - logger.log('Loading all episodes from %s' % sickgear.TVInfoAPI(self.show_obj.tvid).name, logger.DEBUG) + logger.debug('Loading all episodes from %s' % sickgear.TVInfoAPI(self.show_obj.tvid).name) try: tvinfo_ep_list = self.show_obj.load_episodes_from_tvinfo(cache=not self.force, update=True, tvinfo_data=self.tvinfo_data, switch=self.switch, old_tvid=self.old_tvid, old_prodid=self.old_prodid) except BaseTVinfoException as e: - logger.log('Unable to get info from %s, the show info will not be refreshed: %s' % - (sickgear.TVInfoAPI(self.show_obj.tvid).name, ex(e)), logger.ERROR) + logger.error(f'Unable to get info from {sickgear.TVInfoAPI(self.show_obj.tvid).name},' + f' the show info will not be refreshed: {ex(e)}') tvinfo_ep_list = None if None is tvinfo_ep_list: @@ -1437,7 +1431,7 @@ class QueueItemUpdate(ShowQueueItem): # for each ep we found on TVDB delete it from the DB list for cur_season in tvinfo_ep_list: for cur_episode in tvinfo_ep_list[cur_season]: - logger.log('Removing %sx%s from the DB list' % (cur_season, cur_episode), logger.DEBUG) + logger.debug('Removing %sx%s from the DB list' % (cur_season, cur_episode)) if cur_season in db_ep_obj_list and cur_episode in db_ep_obj_list[cur_season]: del db_ep_obj_list[cur_season][cur_episode] @@ -1451,15 +1445,14 @@ class QueueItemUpdate(ShowQueueItem): if self.switch: cl.append(self.show_obj.switch_ep_change_sql( self.old_tvid, self.old_prodid, cur_season, cur_episode, TVSWITCH_EP_DELETED)) - logger.log('Permanently deleting episode %sx%s from the database' % - (cur_season, cur_episode), logger.MESSAGE) + logger.log(f'Permanently deleting episode {cur_season}x{cur_episode} from the database') try: cl.extend(ep_obj.delete_episode(return_sql=True)) except exceptions_helper.EpisodeDeletedException: pass else: - logger.log('Not deleting episode %sx%s from the database because status is: %s' % - (cur_season, cur_episode, statusStrings[status]), logger.MESSAGE) + logger.log(f'Not deleting episode {cur_season}x{cur_episode} from the database' + f' because status is: {statusStrings[status]}') if cl: my_db = db.DBConnection() @@ -1606,7 +1599,7 @@ class QueueItemSwitchSource(ShowQueueItem): else: which_show = '%s:%s' % (self.old_tvid, self.old_prodid) self._set_switch_tbl_status(TVSWITCH_SAME_ID) - logger.log('Unchanged ids given, nothing to do for %s' % which_show, logger.ERROR) + logger.error('Unchanged ids given, nothing to do for %s' % which_show) return True return False @@ -1647,7 +1640,7 @@ class QueueItemSwitchSource(ShowQueueItem): which_show = '%s:%s' % (self.old_tvid, self.old_prodid) ui.notifications.message('TV info source switch: %s' % which_show, 'Error: could not find a id for show on new tv info source') - logger.log('Error: could not find a id for show on new tv info source: %s' % which_show, logger.WARNING) + logger.warning('Error: could not find a id for show on new tv info source: %s' % which_show) self._set_switch_tbl_status(TVSWITCH_NO_NEW_ID) return @@ -1662,7 +1655,7 @@ class QueueItemSwitchSource(ShowQueueItem): which_show = self.show_obj.unique_name else: which_show = '%s:%s' % (self.old_tvid, self.old_prodid) - logger.log('Duplicate shows in DB for show: %s' % which_show, logger.WARNING) + logger.warning('Duplicate shows in DB for show: %s' % which_show) ui.notifications.message('TV info source switch: %s' % which_show, 'Error: %s' % msg) self._set_switch_tbl_status(TVSWITCH_DUPLICATE_SHOW) @@ -1676,7 +1669,7 @@ class QueueItemSwitchSource(ShowQueueItem): ui.notifications.message('TV info source switch: %s' % which_show, 'Error: %s' % msg) self._set_switch_tbl_status(TVSWITCH_SOURCE_NOT_FOUND_ERROR) - logger.log('Unable to find the specified show: %s' % which_show, logger.WARNING) + logger.warning('Unable to find the specified show: %s' % which_show) return tvinfo_config = sickgear.TVInfoAPI(self.new_tvid).api_params.copy() @@ -1696,8 +1689,8 @@ class QueueItemSwitchSource(ShowQueueItem): td = t.get_show(show_id=new_prodid, actors=True, language=self.show_obj.lang) except (BaseException, Exception): td = None - logger.log('Failed to get new tv show id (%s) from source %s' % - (new_prodid, sickgear.TVInfoAPI(self.new_tvid).name), logger.WARNING) + logger.warning(f'Failed to get new tv show id ({new_prodid})' + f' from source {sickgear.TVInfoAPI(self.new_tvid).name}') if None is td: self._set_switch_tbl_status(TVSWITCH_NOT_FOUND_ERROR) msg = 'Show not found on new tv source' @@ -1706,7 +1699,7 @@ class QueueItemSwitchSource(ShowQueueItem): else: which_show = '%s:%s' % (self.old_tvid, self.old_prodid) ui.notifications.message('TV info source switch: %s' % which_show, 'Error: %s' % msg) - logger.log('show: %s not found on new tv source' % self.show_obj.tvid_prodid, logger.WARNING) + logger.warning('show: %s not found on new tv source' % self.show_obj.tvid_prodid) return try: @@ -1756,7 +1749,7 @@ class QueueItemSwitchSource(ShowQueueItem): msg = 'Show %s new id conflicts with existing show: %s' % \ ('[%s (%s)]' % (self.show_obj.unique_name, self.show_obj.tvid_prodid), '[%s (%s)]' % (new_show_obj.unique_name, new_show_obj.tvid_prodid)) - logger.log(msg, logger.WARNING) + logger.warning(msg) return self.progress = 'Switching to new source' self._set_switch_id(new_prodid) diff --git a/sickgear/show_updater.py b/sickgear/show_updater.py index 9d6970be..144398a7 100644 --- a/sickgear/show_updater.py +++ b/sickgear/show_updater.py @@ -72,95 +72,95 @@ class ShowUpdater(object): try: sickgear.db.backup_all_dbs(sickgear.BACKUP_DB_PATH or os.path.join(sickgear.DATA_DIR, 'backup')) except (BaseException, Exception): - logger.log('backup db error', logger.ERROR) + logger.error('backup db error') # refresh network timezones try: network_timezones.update_network_dict() except (BaseException, Exception): - logger.log('network timezone update error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('network timezone update error') + logger.error(traceback.format_exc()) # refresh webdl types try: properFinder.load_webdl_types() except (BaseException, Exception): - logger.log('error loading webdl_types', logger.DEBUG) + logger.debug('error loading webdl_types') # update xem id lists try: sickgear.scene_exceptions.get_xem_ids() except (BaseException, Exception): - logger.log('xem id list update error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('xem id list update error') + logger.error(traceback.format_exc()) # update scene exceptions try: sickgear.scene_exceptions.retrieve_exceptions() except (BaseException, Exception): - logger.log('scene exceptions update error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('scene exceptions update error') + logger.error(traceback.format_exc()) # clear the data of unused providers try: sickgear.helpers.clear_unused_providers() except (BaseException, Exception): - logger.log('unused provider cleanup error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('unused provider cleanup error') + logger.error(traceback.format_exc()) # cleanup image cache try: sickgear.helpers.cleanup_cache() except (BaseException, Exception): - logger.log('image cache cleanup error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('image cache cleanup error') + logger.error(traceback.format_exc()) # check tvinfo cache try: for i in sickgear.TVInfoAPI().all_sources: sickgear.TVInfoAPI(i).setup().check_cache() except (BaseException, Exception): - logger.log('tvinfo cache check error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('tvinfo cache check error') + logger.error(traceback.format_exc()) # cleanup tvinfo cache try: for i in sickgear.TVInfoAPI().all_sources: sickgear.TVInfoAPI(i).setup().clean_cache() except (BaseException, Exception): - logger.log('tvinfo cache cleanup error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('tvinfo cache cleanup error') + logger.error(traceback.format_exc()) # cleanup ignore and require lists try: clean_ignore_require_words() except (BaseException, Exception): - logger.log('ignore, require words cleanup error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('ignore, require words cleanup error') + logger.error(traceback.format_exc()) # cleanup manual search history sickgear.search_queue.remove_old_fifo(sickgear.search_queue.MANUAL_SEARCH_HISTORY) # add missing mapped ids if not sickgear.background_mapping_task.is_alive(): - logger.log(u'Updating the TV info mappings') + logger.log('Updating the TV info mappings') import threading try: sickgear.background_mapping_task = threading.Thread( name='MAPPINGSUPDATER', target=sickgear.indexermapper.load_mapped_ids, kwargs={'update': True}) sickgear.background_mapping_task.start() except (BaseException, Exception): - logger.log('missing mapped ids update error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('missing mapped ids update error') + logger.error(traceback.format_exc()) - logger.log(u'Doing full update on all shows') + logger.log('Doing full update on all shows') # clean out cache directory, remove everything > 12 hours old try: sickgear.helpers.clear_cache() except (BaseException, Exception): - logger.log('cache dir cleanup error', logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('cache dir cleanup error') + logger.error(traceback.format_exc()) # select 10 'Ended' tv_shows updated more than 90 days ago # and all shows not updated more than 180 days ago to include in this update @@ -208,21 +208,21 @@ class ShowUpdater(object): cur_queue_item = sickgear.show_queue_scheduler.action.update_show( cur_show_obj, scheduled_update=True) else: - logger.debug(u'Not updating episodes for show %s because it\'s marked as ended and last/next' - u' episode is not within the grace period.' % cur_show_obj.unique_name) + logger.debug(f'Not updating episodes for show {cur_show_obj.unique_name} because it\'s' + f' marked as ended and last/next episode is not within the grace period.') cur_queue_item = sickgear.show_queue_scheduler.action.refresh_show(cur_show_obj, True, True) pi_list.append(cur_queue_item) except (exceptions_helper.CantUpdateException, exceptions_helper.CantRefreshException) as e: - logger.log(u'Automatic update failed: ' + ex(e), logger.ERROR) + logger.error(f'Automatic update failed: {ex(e)}') if len(pi_list): sickgear.show_queue_scheduler.action.daily_update_running = True ui.ProgressIndicators.set_indicator('dailyUpdate', ui.QueueProgressIndicator('Daily Update', pi_list)) - logger.log(u'Added all shows to show queue for full update') + logger.log('Added all shows to show queue for full update') finally: self.amActive = False diff --git a/sickgear/subtitles.py b/sickgear/subtitles.py index 2cffd798..c8cda3a0 100644 --- a/sickgear/subtitles.py +++ b/sickgear/subtitles.py @@ -124,11 +124,11 @@ class SubtitlesFinder(object): def _main(self): if 1 > len(sickgear.subtitles.get_enabled_service_list()): - logger.log(u'Not enough services selected. At least 1 service is required to' - u' search subtitles in the background', logger.ERROR) + logger.error('Not enough services selected. At least 1 service is required to' + ' search subtitles in the background') return - logger.log(u'Checking for subtitles', logger.MESSAGE) + logger.log('Checking for subtitles') # get episodes on which we want subtitles # criteria is: @@ -164,8 +164,8 @@ class SubtitlesFinder(object): for cur_result in sql_result: if not os.path.isfile(cur_result['location']): - logger.log('Episode file does not exist, cannot download subtitles for episode %dx%d of show %s' - % (cur_result['season'], cur_result['episode'], cur_result['show_name']), logger.DEBUG) + logger.debug(f'Episode file does not exist, cannot download subtitles for episode' + f' {cur_result["season"]:d}x{cur_result["episode"]:d} of show {cur_result["show_name"]}') continue # Old shows rule @@ -177,17 +177,17 @@ class SubtitlesFinder(object): (cur_result['airdate_daydiff'] <= 7 and cur_result['searchcount'] < 7 and now - datetime.datetime.strptime(cur_result['lastsearch'], '%Y-%m-%d %H:%M:%S') > datetime.timedelta(hours=rules['new'][cur_result['searchcount']]))): - logger.log('Downloading subtitles for episode %dx%d of show %s' - % (cur_result['season'], cur_result['episode'], cur_result['show_name']), logger.DEBUG) + logger.debug(f'Downloading subtitles for episode {cur_result["season"]:d}x{cur_result["episode"]:d}' + f' of show {cur_result["show_name"]}') show_obj = helpers.find_show_by_id({int(cur_result['tv_id']): int(cur_result['prod_id'])}) if not show_obj: - logger.log(u'Show not found', logger.DEBUG) + logger.debug('Show not found') return ep_obj = show_obj.get_episode(int(cur_result['season']), int(cur_result['episode'])) if isinstance(ep_obj, str): - logger.log(u'Episode not found', logger.DEBUG) + logger.debug('Episode not found') return # noinspection PyUnusedLocal @@ -197,7 +197,7 @@ class SubtitlesFinder(object): # noinspection PyUnusedLocal subtitles = ep_obj.download_subtitles() except (BaseException, Exception): - logger.log(u'Unable to find subtitles', logger.DEBUG) + logger.debug('Unable to find subtitles') return @staticmethod diff --git a/sickgear/tv.py b/sickgear/tv.py index 23641792..b9e6d5db 100644 --- a/sickgear/tv.py +++ b/sickgear/tv.py @@ -1589,8 +1589,7 @@ class TVShow(TVShowBase): self._paused = int(value) self.dirty = True else: - logger.log('tried to set paused property to invalid value: %s of type: %s' % (value, type(value)), - logger.ERROR) + logger.error('tried to set paused property to invalid value: %s of type: %s' % (value, type(value))) @property def ids(self): @@ -1644,7 +1643,7 @@ class TVShow(TVShowBase): def _set_location(self, new_location): # type: (AnyStr) -> None - logger.log('Setter sets location to %s' % new_location, logger.DEBUG) + logger.debug('Setter sets location to %s' % new_location) # Don't validate dir if user wants to add shows without creating a dir if sickgear.ADD_SHOWS_WO_DIR or os.path.isdir(new_location): self.dirty_setter('_location')(self, new_location) @@ -1781,8 +1780,8 @@ class TVShow(TVShowBase): if no_create: return - # logger.log('%s: An object for episode %sx%s did not exist in the cache, trying to create it' % - # (self.tvid_prodid, season, episode), logger.DEBUG) + # logger.debug('%s: An object for episode %sx%s did not exist in the cache, trying to create it' % + # (self.tvid_prodid, season, episode)) if path and not existing_only: ep_obj = TVEpisode(self, season, episode, path, show_result=ep_result) @@ -1993,8 +1992,7 @@ class TVShow(TVShowBase): # In some situations self.status = None, need to figure out where that is! if not self._status: self.status = '' - logger.log('Status missing for show: [%s] with status: [%s]' % - (self.tvid_prodid, self._status), logger.DEBUG) + logger.debug(f'Status missing for show: [{self.tvid_prodid}] with status: [{self._status}]') last_update_indexer = datetime.date.fromordinal(self._last_update_indexer) @@ -2105,16 +2103,13 @@ class TVShow(TVShowBase): for cur_row in sql_result: if (cur_row['season'], cur_row['episode']) in processed: continue - logger.log('%s: Retrieving/creating episode %sx%s' - % (self.tvid_prodid, cur_row['season'], cur_row['episode']), logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: Retrieving/creating episode {cur_row["season"]}x{cur_row["episode"]}') ep_obj = self.get_episode(cur_row['season'], cur_row['episode'], ep_result=[cur_row]) if not ep_obj.related_ep_obj: processed += [(cur_row['season'], cur_row['episode'])] else: - logger.log('%s: Found related to %sx%s episode(s)... %s' - % (self.tvid_prodid, cur_row['season'], cur_row['episode'], - ', '.join(['%sx%s' % (x.season, x.episode) for x in ep_obj.related_ep_obj])), - logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: Found related to {cur_row["season"]}x{cur_row["episode"]} episode(s)' + f'... {", ".join(["%sx%s" % (x.season, x.episode) for x in ep_obj.related_ep_obj])}') processed += list(set([(cur_row['season'], cur_row['episode'])] + [(x.season, x.episode) for x in ep_obj.related_ep_obj])) ep_obj.create_meta_files(force) @@ -2159,14 +2154,14 @@ class TVShow(TVShowBase): parse_result = None ep_obj = None - logger.log('%s: Creating episode from %s' % (self.tvid_prodid, cur_media_file), logger.DEBUG) + logger.debug('%s: Creating episode from %s' % (self.tvid_prodid, cur_media_file)) try: ep_obj = self.ep_obj_from_file(os.path.join(self._location, cur_media_file)) except (exceptions_helper.ShowNotFoundException, exceptions_helper.EpisodeNotFoundException) as e: - logger.log('Episode %s returned an exception: %s' % (cur_media_file, ex(e)), logger.ERROR) + logger.error('Episode %s returned an exception: %s' % (cur_media_file, ex(e))) continue except exceptions_helper.EpisodeDeletedException: - logger.log('The episode deleted itself when I tried making an object for it', logger.DEBUG) + logger.debug('The episode deleted itself when I tried making an object for it') if None is ep_obj: continue @@ -2183,9 +2178,7 @@ class TVShow(TVShowBase): pass if ep_file_name and parse_result and None is not parse_result.release_group and not ep_obj.release_name: - logger.log( - 'Name %s gave release group of %s, seems valid' % (ep_file_name, parse_result.release_group), - logger.DEBUG) + logger.debug(f'Name {ep_file_name} gave release group of {parse_result.release_group}, seems valid') ep_obj.release_name = ep_file_name # store the reference in the show @@ -2194,8 +2187,8 @@ class TVShow(TVShowBase): try: ep_obj.refresh_subtitles() except (BaseException, Exception): - logger.log('%s: Could not refresh subtitles' % self.tvid_prodid, logger.ERROR) - logger.log(traceback.format_exc(), logger.ERROR) + logger.error('%s: Could not refresh subtitles' % self.tvid_prodid) + logger.error(traceback.format_exc()) result = ep_obj.get_sql() if None is not result: @@ -2238,8 +2231,7 @@ class TVShow(TVShowBase): try: cached_show = t.get_show(self.prodid, language=self._lang) except BaseTVinfoError as e: - logger.log('Unable to find cached seasons from %s: %s' % ( - sickgear.TVInfoAPI(self.tvid).name, ex(e)), logger.WARNING) + logger.warning(f'Unable to find cached seasons from {sickgear.TVInfoAPI(self.tvid).name}: {ex(e)}') if None is cached_show: return scanned_eps @@ -2264,14 +2256,14 @@ class TVShow(TVShowBase): try: cached_seasons[season] = cached_show[season] except BaseTVinfoSeasonnotfound as e: - logger.log('Error when trying to load the episode for [%s] from %s: %s' % - (self._name, sickgear.TVInfoAPI(self.tvid).name, ex(e)), logger.WARNING) + logger.warning(f'Error when trying to load the episode for [{self._name}]' + f' from {sickgear.TVInfoAPI(self.tvid).name}: {ex(e)}') delete_ep = True if season not in scanned_eps: scanned_eps[season] = {} - logger.log('Loading episode %sx%s for [%s] from the DB' % (season, episode, self.name), logger.DEBUG) + logger.debug('Loading episode %sx%s for [%s] from the DB' % (season, episode, self.name)) try: ep_obj = self.get_episode(season, episode, ep_result=[cur_row]) # type: TVEpisode @@ -2285,8 +2277,8 @@ class TVShow(TVShowBase): ep_obj.load_from_tvinfo(tvapi=t, update=update, cached_show=cached_show) scanned_eps[season][episode] = True except exceptions_helper.EpisodeDeletedException: - logger.log('Tried loading an episode that should have been deleted from the DB [%s], skipping it' - % self._name, logger.DEBUG) + logger.debug(f'Tried loading an episode that should have been deleted from the DB [{self._name}],' + f' skipping it') continue if cl: @@ -2338,9 +2330,8 @@ class TVShow(TVShowBase): t = sickgear.TVInfoAPI(self.tvid).setup(**tvinfo_config) show_obj = t.get_show(self.prodid, language=self._lang) except BaseTVinfoError: - logger.log('%s timed out, unable to update episodes for [%s] from %s' % - (sickgear.TVInfoAPI(self.tvid).name, self._name, sickgear.TVInfoAPI(self.tvid).name), - logger.ERROR) + logger.error(f'{sickgear.TVInfoAPI(self.tvid).name} timed out,' + f' unable to update episodes for [{self._name}] from {sickgear.TVInfoAPI(self.tvid).name}') return None scanned_eps = {} @@ -2374,9 +2365,8 @@ class TVShow(TVShowBase): continue with ep_obj.lock: - logger.log('%s: Loading info from %s for episode %sx%s from [%s]' % - (self.tvid_prodid, sickgear.TVInfoAPI(self.tvid).name, season, episode, self._name), - logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: Loading info from {sickgear.TVInfoAPI(self.tvid).name}' + f' for episode {season}x{episode} from [{self._name}]') ep_obj.load_from_tvinfo(season, episode, tvapi=t, update=update, cached_show=show_obj, switch=switch, old_tvid=old_tvid, old_prodid=old_prodid, switch_list=sql_l) @@ -2403,7 +2393,7 @@ class TVShow(TVShowBase): for cur_provider in itervalues(sickgear.metadata_provider_dict): # FIXME: Needs to not show this message if the option is not enabled? - logger.log('Running metadata routines for %s' % cur_provider.name, logger.DEBUG) + logger.debug('Running metadata routines for %s' % cur_provider.name) fanart_result = cur_provider.create_fanart(self) or fanart_result poster_result = cur_provider.create_poster(self) or poster_result @@ -2429,21 +2419,21 @@ class TVShow(TVShowBase): logger.log('%s: Not a real file... %s' % (self.tvid_prodid, path)) return None - logger.log('%s: Creating episode object from %s' % (self.tvid_prodid, path), logger.DEBUG) + logger.debug('%s: Creating episode object from %s' % (self.tvid_prodid, path)) try: my_parser = NameParser(show_obj=self) parse_result = my_parser.parse(path) except InvalidNameException: - logger.log('Unable to parse the filename %s into a valid episode' % path, logger.DEBUG) + logger.debug('Unable to parse the filename %s into a valid episode' % path) return None except InvalidShowException: - logger.log('Unable to parse the filename %s into a valid show' % path, logger.DEBUG) + logger.debug('Unable to parse the filename %s into a valid show' % path) return None if not len(parse_result.episode_numbers): logger.log('parse_result: %s' % parse_result) - logger.log('No episode number found in %s, ignoring it' % path, logger.ERROR) + logger.error('No episode number found in %s, ignoring it' % path) return None # for now let's assume that any episode in the show dir belongs to that show @@ -2455,8 +2445,7 @@ class TVShow(TVShowBase): for cur_ep_num in episode_numbers: cur_ep_num = int(cur_ep_num) - logger.log('%s: %s parsed to %s %sx%s' % (self.tvid_prodid, path, self._name, season_number, cur_ep_num), - logger.DEBUG) + logger.debug('%s: %s parsed to %s %sx%s' % (self.tvid_prodid, path, self._name, season_number, cur_ep_num)) check_quality_again = False same_file = False @@ -2466,7 +2455,7 @@ class TVShow(TVShowBase): try: ep_obj = self.get_episode(season_number, cur_ep_num, path) except exceptions_helper.EpisodeNotFoundException: - logger.log('%s: Unable to figure out what this file is, skipping' % self.tvid_prodid, logger.ERROR) + logger.error('%s: Unable to figure out what this file is, skipping' % self.tvid_prodid) continue else: @@ -2479,8 +2468,8 @@ class TVShow(TVShowBase): if (ep_obj.location and os.path.normpath(ep_obj.location) != os.path.normpath(path)) or \ (not ep_obj.location and path) or \ (SKIPPED == status): - logger.log('The old episode had a different file associated with it, re-checking the quality ' + - 'based on the new filename %s' % path, logger.DEBUG) + logger.debug('The old episode had a different file associated with it, re-checking the quality ' + 'based on the new filename %s' % path) check_quality_again = True with ep_obj.lock: @@ -2509,8 +2498,8 @@ class TVShow(TVShowBase): new_quality = Quality.name_quality(path, self.is_anime) if Quality.UNKNOWN == new_quality: new_quality = Quality.file_quality(path) - logger.log('Since this file was renamed, file %s was checked and quality "%s" found' - % (path, Quality.qualityStrings[new_quality]), logger.DEBUG) + logger.debug(f'Since this file was renamed, file {path}' + f' was checked and quality "{Quality.qualityStrings[new_quality]}" found') status, quality = sickgear.common.Quality.split_composite_status(ep_obj.status) if Quality.UNKNOWN != new_quality or status in (SKIPPED, UNAIRED): ep_obj.status = Quality.composite_status(DOWNLOADED, new_quality) @@ -2530,18 +2519,16 @@ class TVShow(TVShowBase): # if it was snatched and now exists then set the status correctly if SNATCHED == old_status and old_quality <= new_quality: - logger.log('STATUS: this episode used to be snatched with quality %s but' - ' a file exists with quality %s so setting the status to DOWNLOADED' - % (Quality.qualityStrings[old_quality], Quality.qualityStrings[new_quality]), - logger.DEBUG) + logger.debug(f'STATUS: this episode used to be snatched with quality' + f' {Quality.qualityStrings[old_quality]} but a file exists with quality' + f' {Quality.qualityStrings[new_quality]} so setting the status to DOWNLOADED') new_status = DOWNLOADED # if it was snatched proper, and we found a higher quality one then allow the status change elif SNATCHED_PROPER == old_status and old_quality < new_quality: - logger.log('STATUS: this episode used to be snatched proper with quality %s but' - ' a file exists with quality %s so setting the status to DOWNLOADED' - % (Quality.qualityStrings[old_quality], Quality.qualityStrings[new_quality]), - logger.DEBUG) + logger.debug(f'STATUS: this episode used to be snatched proper with quality' + f' {Quality.qualityStrings[old_quality]} but a file exists with quality' + f' {Quality.qualityStrings[new_quality]} so setting the status to DOWNLOADED') new_status = DOWNLOADED elif old_status not in SNATCHED_ANY: @@ -2549,8 +2536,8 @@ class TVShow(TVShowBase): if None is not new_status: with ep_obj.lock: - logger.log('STATUS: we have an associated file, so setting the status from %s to DOWNLOADED/%s' - % (ep_obj.status, Quality.composite_status(new_status, new_quality)), logger.DEBUG) + logger.debug(f'STATUS: we have an associated file, so setting the status from {ep_obj.status}' + f' to DOWNLOADED/{Quality.composite_status(new_status, new_quality)}') ep_obj.status = Quality.composite_status(new_status, new_quality) elif same_file: @@ -2559,8 +2546,8 @@ class TVShow(TVShowBase): new_quality = Quality.name_quality(path, self.is_anime) if Quality.UNKNOWN == new_quality: new_quality = Quality.file_quality(path) - logger.log('Since this file has status: "%s", file %s was checked and quality "%s" found' - % (statusStrings[status], path, Quality.qualityStrings[new_quality]), logger.DEBUG) + logger.debug(f'Since this file has status: "{statusStrings[status]}", file {path}' + f' was checked and quality "{Quality.qualityStrings[new_quality]}" found') ep_obj.status = Quality.composite_status(DOWNLOADED, new_quality) with ep_obj.lock: @@ -2672,7 +2659,7 @@ class TVShow(TVShowBase): self.release_groups = self._anime and AniGroupList(self.tvid, self.prodid, self.tvid_prodid) or None - logger.log(u'Loaded.. {: <9} {: <8} {}'.format( + logger.log('Loaded.. {: <9} {: <8} {}'.format( sickgear.TVInfoAPI(self.tvid).config.get('name') + ',', '%s,' % self.prodid, self.name)) # Get IMDb_info from database @@ -2697,8 +2684,7 @@ class TVShow(TVShowBase): if 'is_mini_series' in self._imdb_info: self._imdb_info['is_mini_series'] = bool(self._imdb_info['is_mini_series']) elif sickgear.USE_IMDB_INFO: - logger.log('%s: The next show update will attempt to find IMDb info for [%s]' % - (self.tvid_prodid, self.name), logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: The next show update will attempt to find IMDb info for [{self.name}]') return self.dirty = False @@ -2799,9 +2785,9 @@ class TVShow(TVShowBase): if None is show_info or getattr(t, 'show_not_found', False): if getattr(t, 'show_not_found', False): self.inc_not_found_count() - logger.log('Show [%s] not found (maybe even removed?)' % self._name, logger.WARNING) + logger.warning('Show [%s] not found (maybe even removed?)' % self._name) else: - logger.log('Show data [%s] not found' % self._name, logger.WARNING) + logger.warning('Show data [%s] not found' % self._name) return False self.reset_not_found_count() @@ -2961,8 +2947,8 @@ class TVShow(TVShowBase): try: old_person_ids.remove(existing_person.id) except KeyError: - logger.log('%s - Person error: %s (%s)' % - (self.name, existing_person.name, existing_person.id), logger.ERROR) + logger.error(f'{self.name} -' + f' Person error: {existing_person.name} ({existing_person.id})') pass if force: existing_person.reset(src_person) @@ -3025,12 +3011,12 @@ class TVShow(TVShowBase): if not sickgear.USE_IMDB_INFO: return - logger.log('Retrieving show info [%s] from IMDb' % self._name, logger.DEBUG) + logger.debug('Retrieving show info [%s] from IMDb' % self._name) try: self._get_imdb_info() except (BaseException, Exception) as e: - logger.log('Error loading IMDb info: %s' % ex(e), logger.ERROR) - logger.log('%s' % traceback.format_exc(), logger.ERROR) + logger.error('Error loading IMDb info: %s' % ex(e)) + logger.error('%s' % traceback.format_exc()) @staticmethod def check_imdb_redirect(imdb_id): @@ -3079,7 +3065,7 @@ class TVShow(TVShowBase): imdb_info['imdb_id'] = self.imdbid i = imdbpie.Imdb(exclude_episodes=True, cachedir=os.path.join(sickgear.CACHE_DIR, 'imdb-pie')) if not helpers.parse_imdb_id(imdb_id): - logger.log('Not a valid imdbid: %s for show: %s' % (imdb_id, self._name), logger.WARNING) + logger.warning('Not a valid imdbid: %s for show: %s' % (imdb_id, self._name)) return imdb_ratings = i.get_title_ratings(imdb_id=imdb_id) imdb_akas = i.get_title_versions(imdb_id=imdb_id) @@ -3087,8 +3073,8 @@ class TVShow(TVShowBase): ipie = getattr(imdbpie.__dict__.get('imdbpie'), '_SIMPLE_GET_ENDPOINTS', None) if ipie: ipie.update({ - u'get_title_certificates': u'/title/{imdb_id}/certificates', - u'get_title_parentalguide': u'/title/{imdb_id}/parentalguide', + 'get_title_certificates': '/title/{imdb_id}/certificates', + 'get_title_parentalguide': '/title/{imdb_id}/parentalguide', }) imdb_certificates = i.get_title_certificates(imdb_id=imdb_id) except LookupError as e: @@ -3099,17 +3085,17 @@ class TVShow(TVShowBase): indexermapper.map_indexers_to_show(self, force=True) if not retry and imdb_id != 'tt%07d' % self.ids[indexermapper.TVINFO_IMDB]['id']: # add retry arg to prevent endless loops - logger.log('imdbid: %s not found. retrying with newly found id: %s' % - (imdb_id, 'tt%07d' % self.ids[indexermapper.TVINFO_IMDB]['id']), logger.DEBUG) + logger.debug(f'imdbid: {imdb_id} not found. retrying with newly found id:' + f' {"tt%07d" % self.ids[indexermapper.TVINFO_IMDB]["id"]}') self._get_imdb_info(retry=True) return - logger.log('imdbid: %s not found. Error: %s' % (imdb_id, ex(e)), logger.WARNING) + logger.warning('imdbid: %s not found. Error: %s' % (imdb_id, ex(e))) return except ImdbAPIError as e: - logger.log('Imdb API Error: %s' % ex(e), logger.WARNING) + logger.warning('Imdb API Error: %s' % ex(e)) return except (BaseException, Exception) as e: - logger.log('Error: %s retrieving imdb id: %s' % (ex(e), imdb_id), logger.WARNING) + logger.warning('Error: %s retrieving imdb id: %s' % (ex(e), imdb_id)) return # ratings @@ -3180,19 +3166,19 @@ class TVShow(TVShowBase): imdb_info['certificates'] = '|'.join([cert for cert in itervalues(certs_head) if cert] + sorted(certs_tail)) if (not imdb_info['certificates'] and isinstance(imdb_tv.get('certificate'), dict) and isinstance(imdb_tv.get('certificate').get('certificate'), string_types)): - imdb_info['certificates'] = '%s:%s' % (u'US', imdb_tv.get('certificate').get('certificate')) + imdb_info['certificates'] = f'US:{imdb_tv.get("certificate").get("certificate")}' imdb_info['last_update'] = datetime.date.today().toordinal() # Rename dict keys without spaces for DB upsert self.imdb_info = dict( [(k.replace(' ', '_'), k(v) if hasattr(v, 'keys') else v) for k, v in iteritems(imdb_info)]) - logger.log('%s: Obtained info from IMDb -> %s' % (self.tvid_prodid, self._imdb_info), logger.DEBUG) + logger.debug('%s: Obtained info from IMDb -> %s' % (self.tvid_prodid, self._imdb_info)) logger.log('%s: Parsed latest IMDb show info for [%s]' % (self.tvid_prodid, self._name)) def next_episode(self): - logger.log('%s: Finding the episode which airs next for: %s' % (self.tvid_prodid, self._name), logger.DEBUG) + logger.debug('%s: Finding the episode which airs next for: %s' % (self.tvid_prodid, self._name)) cur_date = datetime.date.today().toordinal() if not self.nextaired or self.nextaired and cur_date > self.nextaired: @@ -3208,11 +3194,10 @@ class TVShow(TVShowBase): """, [self.tvid, self.prodid, datetime.date.today().toordinal(), UNAIRED, WANTED, FAILED]) if None is sql_result or 0 == len(sql_result): - logger.log('%s: No episode found... need to implement a show status' % self.tvid_prodid, logger.DEBUG) + logger.debug('%s: No episode found... need to implement a show status' % self.tvid_prodid) self.nextaired = '' else: - logger.log('%s: Found episode %sx%s' % ( - self.tvid_prodid, sql_result[0]['season'], sql_result[0]['episode']), logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: Found episode {sql_result[0]["season"]}x{sql_result[0]["episode"]}') self.nextaired = sql_result[0]['airdate'] return self.nextaired @@ -3298,20 +3283,20 @@ class TVShow(TVShowBase): file_attribute = os.stat(self.location)[0] if not file_attribute & stat.S_IWRITE: # File is read-only, so make it writeable - logger.log('Attempting to make writeable the read only folder %s' % self._location, logger.DEBUG) + logger.debug('Attempting to make writeable the read only folder %s' % self._location) try: os.chmod(self.location, stat.S_IWRITE) except (BaseException, Exception): - logger.log('Unable to change permissions of %s' % self._location, logger.WARNING) + logger.warning('Unable to change permissions of %s' % self._location) result = helpers.remove_file(self.location, tree=True) if result: logger.log('%s show folder %s' % (result, self._location)) except exceptions_helper.ShowDirNotFoundException: - logger.log('Show folder does not exist, no need to %s %s' % (action, self._location), logger.WARNING) + logger.warning('Show folder does not exist, no need to %s %s' % (action, self._location)) except OSError as e: - logger.log('Unable to %s %s: %s / %s' % (action, self._location, repr(e), ex(e)), logger.WARNING) + logger.warning('Unable to %s %s: %s / %s' % (action, self._location, repr(e), ex(e))) def populate_cache(self, force=False): # type: (bool) -> None @@ -3359,8 +3344,8 @@ class TVShow(TVShowBase): try: ep_obj = self.get_episode(season, episode, ep_result=[cur_row]) except exceptions_helper.EpisodeDeletedException: - logger.log('The episode from [%s] was deleted while we were refreshing it, moving on to the next one' - % self._name, logger.DEBUG) + logger.debug(f'The episode from [{self._name}] was deleted while we were refreshing it,' + f' moving on to the next one') continue # if the path exist and if it's in our show dir @@ -3371,10 +3356,9 @@ class TVShow(TVShowBase): # locations repeat but attempt to delete once attempted += ep_obj.location if kept >= self.prune: - result = helpers.remove_file(ep_obj.location, prefix_failure=u'%s: ' % self.tvid_prodid) + result = helpers.remove_file(ep_obj.location, prefix_failure=f'{self.tvid_prodid}: ') if result: - logger.log(u'%s: %s file %s' % (self.tvid_prodid, result, ep_obj.location), - logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: {result} file {ep_obj.location}') deleted += 1 else: kept += 1 @@ -3394,10 +3378,9 @@ class TVShow(TVShowBase): else: ep_obj.status = (sickgear.SKIP_REMOVED_FILES, IGNORED)[ not sickgear.SKIP_REMOVED_FILES] - logger.log( - '%s: File no longer at location for s%02de%02d,' % (self.tvid_prodid, season, episode) - + ' episode removed and status changed to %s' % statusStrings[ep_obj.status], - logger.DEBUG) + logger.debug(f'{self.tvid_prodid}: File no longer at location for' + f' s{season:02d}e{episode:02d}, episode removed' + f' and status changed to {statusStrings[ep_obj.status]}') ep_obj.subtitles = list() ep_obj.subtitles_searchcount = 0 ep_obj.subtitles_lastsearch = str(datetime.datetime.min) @@ -3431,9 +3414,9 @@ class TVShow(TVShowBase): """ # TODO: Add support for force option if not os.path.isdir(self._location): - logger.log('%s: Show directory doesn\'t exist, can\'t download subtitles' % self.tvid_prodid, logger.DEBUG) + logger.debug('%s: Show directory doesn\'t exist, can\'t download subtitles' % self.tvid_prodid) return - logger.log('%s: Downloading subtitles' % self.tvid_prodid, logger.DEBUG) + logger.debug('%s: Downloading subtitles' % self.tvid_prodid) try: my_db = db.DBConnection() @@ -3449,7 +3432,7 @@ class TVShow(TVShowBase): ep_obj = self.ep_obj_from_file(cur_row['location']) _ = ep_obj.download_subtitles(force=force) except (BaseException, Exception): - logger.log('Error occurred when downloading subtitles: %s' % traceback.format_exc(), logger.ERROR) + logger.error('Error occurred when downloading subtitles: %s' % traceback.format_exc()) return def remove_character_images(self): @@ -3535,8 +3518,7 @@ class TVShow(TVShowBase): try: os.rename(old_dir, new_dir) except (BaseException, Exception) as e: - logger.log('Unable to rename %s to %s: %s / %s' % (old_dir, new_dir, repr(e), ex(e)), - logger.WARNING) + logger.warning('Unable to rename %s to %s: %s / %s' % (old_dir, new_dir, repr(e), ex(e))) old_id = TVidProdid({old_tvid: old_prodid})() rating = sickgear.FANART_RATINGS.get(old_id) @@ -3563,7 +3545,7 @@ class TVShow(TVShowBase): self, force=True, web=True, priority=QueuePriorities.VERYHIGH, pausestatus_after=pausestatus_after, switch_src=True) except exceptions_helper.CantUpdateException as e: - logger.log('Unable to update this show. %s' % ex(e), logger.ERROR) + logger.error('Unable to update this show. %s' % ex(e)) def save_to_db(self, force_save=False): # type: (bool) -> None @@ -3572,10 +3554,10 @@ class TVShow(TVShowBase): :param force_save: """ if not self.dirty and not force_save: - logger.log('%s: Not saving show to db - record is not dirty' % self.tvid_prodid, logger.DEBUG) + logger.debug('%s: Not saving show to db - record is not dirty' % self.tvid_prodid) return - logger.log('%s: Saving show info to database' % self.tvid_prodid, logger.DEBUG) + logger.debug('%s: Saving show info to database' % self.tvid_prodid) new_value_dict = dict( air_by_date=self._air_by_date, @@ -3672,8 +3654,8 @@ class TVShow(TVShowBase): :param multi_ep: multiple episodes :return: """ - logger.log('Checking if found %sepisode %sx%s is wanted at quality %s' % - (('', 'multi-part ')[multi_ep], season, episode, Quality.qualityStrings[quality]), logger.DEBUG) + logger.debug(f'Checking if found {("", "multi-part ")[multi_ep]}episode {season}x{episode}' + f' is wanted at quality {Quality.qualityStrings[quality]}') if not multi_ep: try: @@ -3682,19 +3664,19 @@ class TVShow(TVShowBase): if quality in wq: cur_status, cur_quality = Quality.split_composite_status(self.sxe_ep_obj[season][episode].status) if cur_status in (WANTED, UNAIRED, SKIPPED, FAILED): - logger.log('Existing episode status is wanted/unaired/skipped/failed,' - ' getting found episode', logger.DEBUG) + logger.debug('Existing episode status is wanted/unaired/skipped/failed,' + ' getting found episode') return True elif manual_search: - logger.log('Usually ignoring found episode, but forced search allows the quality,' - ' getting found episode', logger.DEBUG) + logger.debug('Usually ignoring found episode, but forced search allows the quality,' + ' getting found episode') return True elif quality > cur_quality: - logger.log( - 'Episode already exists but the found episode has better quality,' - ' getting found episode', logger.DEBUG) + logger.debug('Episode already exists but the found episode has better quality,' + ' getting found episode') return True - logger.log('None of the conditions were met, ignoring found episode', logger.DEBUG) + logger.debug('None of the conditions were met,' + ' ignoring found episode') return False except (BaseException, Exception): pass @@ -3707,10 +3689,11 @@ class TVShow(TVShowBase): if 0 < len(archive_qualities): initial = '+ upgrade to %s + (%s)'\ % (initial, ','.join([Quality.qualityStrings[qual] for qual in archive_qualities])) - logger.log('Want initial %s and found %s' % (initial, Quality.qualityStrings[quality]), logger.DEBUG) + logger.debug('Want initial %s and found %s' % (initial, Quality.qualityStrings[quality])) if quality not in all_qualities: - logger.log('Don\'t want this quality, ignoring found episode', logger.DEBUG) + logger.debug('Don\'t want this quality,' + ' ignoring found episode') return False my_db = db.DBConnection() @@ -3722,34 +3705,33 @@ class TVShow(TVShowBase): """, [self.tvid, self.prodid, season, episode]) if not sql_result or not len(sql_result): - logger.log('Unable to find a matching episode in database, ignoring found episode', logger.DEBUG) + logger.debug('Unable to find a matching episode in database,' + ' ignoring found episode') return False cur_status, cur_quality = Quality.split_composite_status(int(sql_result[0]['status'])) ep_status_text = statusStrings[cur_status] - logger.log('Existing episode status: %s (%s)' % (statusStrings[cur_status], ep_status_text), logger.DEBUG) + logger.debug('Existing episode status: %s (%s)' % (statusStrings[cur_status], ep_status_text)) # if we know we don't want it then just say no if cur_status in [IGNORED, ARCHIVED] + ([SKIPPED], [])[multi_ep] and not manual_search: - logger.log('Existing episode status is %signored/archived, ignoring found episode' % - ('skipped/', '')[multi_ep], logger.DEBUG) + logger.debug(f'Existing episode status is {("skipped/", "")[multi_ep]}ignored/archived,' + f' ignoring found episode') return False # if it's one of these then we want it as long as it's in our allowed initial qualities if quality in all_qualities: if cur_status in [WANTED, UNAIRED, SKIPPED, FAILED] + ([], SNATCHED_ANY)[multi_ep]: - logger.log('Existing episode status is wanted/unaired/skipped/failed, getting found episode', - logger.DEBUG) + logger.debug('Existing episode status is wanted/unaired/skipped/failed,' + ' getting found episode') return True elif manual_search: - logger.log( - 'Usually ignoring found episode, but forced search allows the quality, getting found episode', - logger.DEBUG) + logger.debug('Usually ignoring found episode, but forced search allows the quality,' + ' getting found episode') return True else: - logger.log('Quality is on wanted list, need to check if it\'s better than existing quality', - logger.DEBUG) + logger.debug('Quality is on wanted list, need to check if it\'s better than existing quality') downloaded_status_list = SNATCHED_ANY + [DOWNLOADED] # special case: already downloaded quality is not in any of the wanted Qualities @@ -3760,14 +3742,14 @@ class TVShow(TVShowBase): # if re-downloading then only keep items in the archiveQualities list and better than what we have if cur_status in downloaded_status_list and quality in wanted_qualities and quality > cur_quality: - logger.log('Episode already exists but the found episode has better quality, getting found episode', - logger.DEBUG) + logger.debug('Episode already exists but the found episode has better quality,' + ' getting found episode') return True else: - logger.log('Episode already exists and the found episode has same/lower quality, ignoring found episode', - logger.DEBUG) + logger.debug('Episode already exists and the found episode has same/lower quality,' + ' ignoring found episode') - logger.log('None of the conditions were met, ignoring found episode', logger.DEBUG) + logger.debug('None of the conditions were met, ignoring found episode') return False def get_overview(self, ep_status, split_snatch=False): @@ -3942,7 +3924,7 @@ class TVEpisode(TVEpisodeBase): def _set_location(self, val): log_vals = (('clears', ''), ('sets', ' to ' + val))[any(val)] # noinspection PyStringFormat - logger.log(u'Setter %s location%s' % log_vals, logger.DEBUG) + logger.debug('Setter %s location%s' % log_vals) # self._location = newLocation self.dirty_setter('_location')(self, val) @@ -3972,11 +3954,10 @@ class TVEpisode(TVEpisodeBase): # TODO: Add support for force option if not os.path.isfile(self.location): - logger.log('%s: Episode file doesn\'t exist, can\'t download subtitles for episode %sx%s' % - (self.show_obj.tvid_prodid, self.season, self.episode), logger.DEBUG) + logger.debug(f'{self.show_obj.tvid_prodid}: Episode file doesn\'t exist,' + f' can\'t download subtitles for episode {self.season}x{self.episode}') return - logger.log('%s: Downloading subtitles for episode %sx%s' - % (self.show_obj.tvid_prodid, self.season, self.episode), logger.DEBUG) + logger.debug(f'{self.show_obj.tvid_prodid}: Downloading subtitles for episode {self.season}x{self.episode}') previous_subtitles = self.subtitles @@ -3993,7 +3974,7 @@ class TVEpisode(TVEpisodeBase): subs_new_path = os.path.join(os.path.dirname(video.path), sickgear.SUBTITLES_DIR) dir_exists = helpers.make_dir(subs_new_path) if not dir_exists: - logger.log('Unable to create subtitles folder %s' % subs_new_path, logger.ERROR) + logger.error('Unable to create subtitles folder %s' % subs_new_path) else: helpers.chmod_as_parent(subs_new_path) @@ -4007,7 +3988,7 @@ class TVEpisode(TVEpisodeBase): helpers.chmod_as_parent(subtitle.path) except (BaseException, Exception): - logger.log('Error occurred when downloading subtitles: %s' % traceback.format_exc(), logger.ERROR) + logger.error('Error occurred when downloading subtitles: %s' % traceback.format_exc()) return self.refresh_subtitles() @@ -4022,17 +4003,17 @@ class TVEpisode(TVEpisodeBase): try: subtitle_list = ", ".join([subliminal.language.Language(x).name for x in newsubtitles]) except (BaseException, Exception): - logger.log('Could not parse a language to use to fetch subtitles for episode %sx%s' % - (self.season, self.episode), logger.DEBUG) + logger.debug(f'Could not parse a language to use to fetch subtitles' + f' for episode {self.season}x{self.episode}') return - logger.log('%s: Downloaded %s subtitles for episode %sx%s' % - (self.show_obj.tvid_prodid, subtitle_list, self.season, self.episode), logger.DEBUG) + logger.debug(f'{self.show_obj.tvid_prodid}: Downloaded {subtitle_list} subtitles' + f' for episode {self.season}x{self.episode}') notifiers.notify_subtitle_download(self, subtitle_list) else: - logger.log('%s: No subtitles downloaded for episode %sx%s' - % (self.show_obj.tvid_prodid, self.season, self.episode), logger.DEBUG) + logger.debug(f'{self.show_obj.tvid_prodid}: No subtitles downloaded' + f' for episode {self.season}x{self.episode}') if sickgear.SUBTITLES_HISTORY: for video in subs: @@ -4092,8 +4073,8 @@ class TVEpisode(TVEpisodeBase): try: self.load_from_nfo(self.location) except exceptions_helper.NoNFOException: - logger.log('%s: There was an error loading the NFO for episode %sx%s' % - (self.show_obj.tvid_prodid, season, episode), logger.ERROR) + logger.error(f'{self.show_obj.tvid_prodid}: There was an error loading the NFO' + f' for episode {season}x{episode}') pass # if we tried loading it from NFO and didn't find the NFO, try the Indexers @@ -4118,8 +4099,7 @@ class TVEpisode(TVEpisodeBase): :param episode: episode number :param show_result: """ - logger.log('%s: Loading episode details from DB for episode %sx%s' - % (self._show_obj.tvid_prodid, season, episode), logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}: Loading episode details from DB for episode {season}x{episode}') show_result = show_result and next(iter(show_result), None) if not show_result or episode != show_result['episode'] or season != show_result['season']: @@ -4136,8 +4116,8 @@ class TVEpisode(TVEpisodeBase): if len(sql_result): raise exceptions_helper.MultipleDBEpisodesException('DB has multiple records for the same show') - logger.log('%s: Episode %sx%s not found in the database' - % (self._show_obj.tvid_prodid, self._season, self._episode), logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}: Episode {self._season}x{self._episode}' + f' not found in the database') return False show_result = next(iter(sql_result)) @@ -4259,9 +4239,8 @@ class TVEpisode(TVEpisodeBase): if None is episode: episode = self._episode - logger.log('%s: Loading episode details from %s for episode %sx%s' % - (self._show_obj.tvid_prodid, sickgear.TVInfoAPI(self._show_obj.tvid).name, season, episode), - logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}: Loading episode details from' + f' {sickgear.TVInfoAPI(self._show_obj.tvid).name} for episode {season}x{episode}') try: if cached_show: @@ -4290,35 +4269,34 @@ class TVEpisode(TVEpisodeBase): ep_info = cached_season[episode] # type: TVInfoEpisode except (BaseTVinfoEpisodenotfound, BaseTVinfoSeasonnotfound): - logger.log('Unable to find the episode on %s... has it been removed? Should I delete from db?' % - sickgear.TVInfoAPI(self.tvid).name, logger.DEBUG) - # if I'm no longer on the Indexers, but I once was then delete myself from the DB + logger.debug(f'Unable to find the episode on {sickgear.TVInfoAPI(self.tvid).name}...' + f' has it been removed? Should it be deleted from the db?') + # if no longer on the Indexers, but once was, then delete it from the DB if -1 != self._epid and helpers.should_delete_episode(self._status): self.delete_episode() elif UNKNOWN == self._status: self.status = SKIPPED return except (BaseTVinfoError, IOError) as e: - logger.log('%s threw up an error: %s' % (sickgear.TVInfoAPI(self.tvid).name, ex(e)), logger.DEBUG) + logger.debug('%s threw up an error: %s' % (sickgear.TVInfoAPI(self.tvid).name, ex(e))) # if the episode is already valid just log it, if not throw it up if UNKNOWN == self._status: self.status = SKIPPED if self._name: - logger.log('%s timed out but there is enough info from other sources, allowing the error' % - sickgear.TVInfoAPI(self.tvid).name, logger.DEBUG) + logger.debug(f'{sickgear.TVInfoAPI(self.tvid).name}' + f' timed out but there is enough info from other sources, allowing the error') return - logger.log('%s timed out, unable to create the episode' % sickgear.TVInfoAPI(self.tvid).name, - logger.ERROR) + logger.error('%s timed out, unable to create the episode' % sickgear.TVInfoAPI(self.tvid).name) return False if getattr(ep_info, 'absolute_number', None) in (None, ''): logger.debug('This episode (%s - %sx%s) has no absolute number on %s' % (self.show_obj.unique_name, season, episode, sickgear.TVInfoAPI(self.tvid).name)) else: - logger.log('%s: The absolute_number for %sx%s is : %s' % - (self._show_obj.tvid_prodid, season, episode, ep_info['absolute_number']), logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}:' + f' The absolute_number for {season}x{episode} is : {ep_info["absolute_number"]}') self.absolute_number = int(ep_info['absolute_number']) if switch and None is not switch_list: @@ -4407,7 +4385,7 @@ class TVEpisode(TVEpisodeBase): # early conversion to int so that episode doesn't get marked dirty self.epid = getattr(ep_info, 'id', None) if None is self._epid: - logger.log('Failed to retrieve ID from %s' % sickgear.TVInfoAPI(self.tvid).name, logger.ERROR) + logger.error('Failed to retrieve ID from %s' % sickgear.TVInfoAPI(self.tvid).name) if helpers.should_delete_episode(self._status): self.delete_episode() elif UNKNOWN == self._status: @@ -4427,9 +4405,8 @@ class TVEpisode(TVEpisodeBase): return if self._location: - logger.log('%s: Setting status for %sx%s based on status %s and existence of %s' % - (self._show_obj.tvid_prodid, season, episode, statusStrings[self._status], self._location), - logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}: Setting status for {season}x{episode}' + f' based on status {statusStrings[self._status]} and existence of {self._location}') # if we don't have the file if not os.path.isfile(self._location): @@ -4474,24 +4451,24 @@ class TVEpisode(TVEpisodeBase): else: msg = 'Not touching episode status %s, because there is no file' - logger.log(msg % statusStrings[self._status], logger.DEBUG) + logger.debug(msg % statusStrings[self._status]) # if we have a media file then it's downloaded elif sickgear.helpers.has_media_ext(self._location): if IGNORED == self._status: - logger.log('File exists for %sx%s, ignoring because of status %s' % - (self._season, self._episode, statusStrings[self._status]), logger.DEBUG) + logger.debug(f'File exists for {self._season}x{self._episode},' + f' ignoring because of status {statusStrings[self._status]}') # leave propers alone, you have to either post-process them or manually change them back elif self._status not in Quality.SNATCHED_ANY + Quality.DOWNLOADED + Quality.ARCHIVED: msg = '(1) Status changes from %s to ' % statusStrings[self._status] self.status = Quality.status_from_name_or_file(self._location, anime=self._show_obj.is_anime) - logger.log('%s%s' % (msg, statusStrings[self._status]), logger.DEBUG) + logger.debug('%s%s' % (msg, statusStrings[self._status])) # shouldn't get here probably else: msg = '(2) Status changes from %s to ' % statusStrings[self._status] self.status = UNKNOWN - logger.log('%s%s' % (msg, statusStrings[self._status]), logger.DEBUG) + logger.debug('%s%s' % (msg, statusStrings[self._status])) def load_from_nfo(self, location): """ @@ -4505,8 +4482,8 @@ class TVEpisode(TVEpisodeBase): % self._show_obj.tvid_prodid) return - logger.log('%s: Loading episode details from the NFO file associated with %s' - % (self.show_obj.tvid_prodid, location), logger.DEBUG) + logger.debug(f'{self.show_obj.tvid_prodid}' + f': Loading episode details from the NFO file associated with {location}') self.location = location @@ -4514,24 +4491,22 @@ class TVEpisode(TVEpisodeBase): if UNKNOWN == self._status and sickgear.helpers.has_media_ext(self.location): status_quality = Quality.status_from_name_or_file(self.location, anime=self._show_obj.is_anime) - logger.log('(3) Status changes from %s to %s' % (self._status, status_quality), logger.DEBUG) + logger.debug('(3) Status changes from %s to %s' % (self._status, status_quality)) self.status = status_quality nfo_file = sickgear.helpers.replace_extension(self.location, 'nfo') - logger.log('%s: Using NFO name %s' % (self._show_obj.tvid_prodid, nfo_file), logger.DEBUG) + logger.debug('%s: Using NFO name %s' % (self._show_obj.tvid_prodid, nfo_file)) if os.path.isfile(nfo_file): try: show_xml = etree.ElementTree(file=nfo_file) except (SyntaxError, ValueError) as e: - logger.log('Error loading the NFO, backing up the NFO and skipping for now: %s' % ex(e), - logger.ERROR) # TODO: figure out what's wrong and fix it + # TODO: figure out what's wrong and fix it + logger.error('Error loading the NFO, backing up the NFO and skipping for now: %s' % ex(e)) try: os.rename(nfo_file, '%s.old' % nfo_file) except (BaseException, Exception) as e: - logger.log( - 'Failed to rename your episode\'s NFO file - you need to delete it or fix it: %s' % ex(e), - logger.ERROR) + logger.error(f'Failed to rename episode\'s NFO file - you need to delete it or fix it: {ex(e)}') raise exceptions_helper.NoNFOException('Error in NFO format') # TODO: deprecated function getiterator needs to be replaced @@ -4540,10 +4515,10 @@ class TVEpisode(TVEpisodeBase): if None is epDetails.findtext('season') or int(epDetails.findtext('season')) != self._season or \ None is epDetails.findtext('episode') or int( epDetails.findtext('episode')) != self._episode: - logger.log('%s: NFO has an block for a different episode - wanted %sx%s' - ' but got %sx%s' % - (self._show_obj.tvid_prodid, self._season, self._episode, - epDetails.findtext('season'), epDetails.findtext('episode')), logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}' + f': NFO has an block for a different episode - wanted' + f' {self._season}x{self._episode}' + f' but got {epDetails.findtext("season")}x{epDetails.findtext("episode")}') continue if None is epDetails.findtext('title') or None is epDetails.findtext('aired'): @@ -4664,11 +4639,11 @@ class TVEpisode(TVEpisodeBase): # remove myself from the show dictionary if self.show_obj.get_episode(self._season, self._episode, no_create=True) == self: - logger.log('Removing myself from my show\'s list', logger.DEBUG) + logger.debug('Removing myself from my show\'s list') del self.show_obj.sxe_ep_obj[self._season][self._episode] # delete myself from the DB - logger.log('Deleting myself from the database', logger.DEBUG) + logger.debug('Deleting myself from the database') sql = [['DELETE FROM tv_episodes WHERE indexer = ? AND showid = ? AND season = ? AND episode = ?', [self._show_obj.tvid, self._show_obj.prodid, self._season, self._episode]]] @@ -4690,7 +4665,7 @@ class TVEpisode(TVEpisodeBase): """ if not self.dirty and not force_save: - logger.log('%s: Not creating SQL queue - record is not dirty' % self._show_obj.tvid_prodid, logger.DEBUG) + logger.debug('%s: Not creating SQL queue - record is not dirty' % self._show_obj.tvid_prodid) return self.dirty = False @@ -4750,12 +4725,12 @@ class TVEpisode(TVEpisodeBase): """ if not self.dirty and not force_save: - logger.log('%s: Not saving episode to db - record is not dirty' % self._show_obj.tvid_prodid, logger.DEBUG) + logger.debug('%s: Not saving episode to db - record is not dirty' % self._show_obj.tvid_prodid) return - logger.log('%s: Saving episode details to database' % self._show_obj.tvid_prodid, logger.DEBUG) + logger.debug('%s: Saving episode details to database' % self._show_obj.tvid_prodid) - logger.log('STATUS IS %s' % statusStrings[self._status], logger.DEBUG) + logger.debug('STATUS IS %s' % statusStrings[self._status]) new_value_dict = dict( absolute_number=self._absolute_number, @@ -4908,7 +4883,7 @@ class TVEpisode(TVEpisodeBase): np = NameParser(name, show_obj=show_obj, naming_pattern=True) parse_result = np.parse(name) except (InvalidNameException, InvalidShowException) as e: - logger.log('Unable to get parse release_group: %s' % ex(e), logger.DEBUG) + logger.debug('Unable to get parse release_group: %s' % ex(e)) return '' if not parse_result.release_group: @@ -5005,7 +4980,7 @@ class TVEpisode(TVEpisodeBase): result_name = result_name.replace('%RG', 'SickGear') result_name = result_name.replace('%rg', 'SickGear') - logger.log('Episode has no release name, replacing it with a generic one: %s' % result_name, logger.DEBUG) + logger.debug('Episode has no release name, replacing it with a generic one: %s' % result_name) if not replace_map['%RT']: result_name = re.sub('([ _.-]*)%RT([ _.-]*)', r'\2', result_name) @@ -5113,14 +5088,14 @@ class TVEpisode(TVEpisodeBase): # fill out the template for this piece and then insert this piece into the actual pattern cur_name_group_result = re.sub('(?i)(?x)' + regex_used, regex_replacement, cur_name_group) # cur_name_group_result = cur_name_group.replace(ep_format, ep_string) - # logger.log(u"found "+ep_format+" as the ep pattern using "+regex_used+" + # logger.debug("found "+ep_format+" as the ep pattern using "+regex_used+" # and replaced it with "+regex_replacement+" to result in "+cur_name_group_result+" - # from "+cur_name_group, logger.DEBUG) + # from "+cur_name_group) result_name = result_name.replace(cur_name_group, cur_name_group_result) result_name = self._format_string(result_name, replace_map) - logger.log('formatting pattern: %s -> %s' % (pattern, result_name), logger.DEBUG) + logger.debug('formatting pattern: %s -> %s' % (pattern, result_name)) return result_name @@ -5196,7 +5171,7 @@ class TVEpisode(TVEpisodeBase): """ if not os.path.isfile(self.location): - logger.log('Can\'t perform rename on %s when it doesn\'t exist, skipping' % self.location, logger.WARNING) + logger.warning('Can\'t perform rename on %s when it doesn\'t exist, skipping' % self.location) return proper_path = self.proper_path() @@ -5211,13 +5186,11 @@ class TVEpisode(TVEpisodeBase): if absolute_current_path_no_ext.startswith(self._show_obj.location): current_path = absolute_current_path_no_ext[len(self._show_obj.location):] - logger.log('Renaming/moving episode from the base path %s to %s' % (self.location, absolute_proper_path), - logger.DEBUG) + logger.debug('Renaming/moving episode from the base path %s to %s' % (self.location, absolute_proper_path)) # if it's already named correctly then don't do anything if proper_path == current_path: - logger.log('%s: File %s is already named correctly, skipping' % (self._epid, self.location), - logger.DEBUG) + logger.debug('%s: File %s is already named correctly, skipping' % (self._epid, self.location)) return related_files = postProcessor.PostProcessor(self.location).list_associated_files( @@ -5228,7 +5201,7 @@ class TVEpisode(TVEpisodeBase): subtitles_only=True) # absolute_proper_subs_path = os.path.join(sickgear.SUBTITLES_DIR, self.formatted_filename()) - logger.log('Files associated to %s: %s' % (self.location, related_files), logger.DEBUG) + logger.debug('Files associated to %s: %s' % (self.location, related_files)) # move the ep file result = helpers.rename_ep_file(self.location, absolute_proper_path, absolute_current_path_no_ext_length) @@ -5238,14 +5211,14 @@ class TVEpisode(TVEpisodeBase): renamed = helpers.rename_ep_file(cur_related_file, absolute_proper_path, absolute_current_path_no_ext_length) if not renamed: - logger.log('%s: Unable to rename file %s' % (self._epid, cur_related_file), logger.ERROR) + logger.error('%s: Unable to rename file %s' % (self._epid, cur_related_file)) for cur_related_sub in related_subs: absolute_proper_subs_path = os.path.join(sickgear.SUBTITLES_DIR, self.formatted_filename()) renamed = helpers.rename_ep_file(cur_related_sub, absolute_proper_subs_path, absolute_current_path_no_ext_length) if not renamed: - logger.log('%s: Unable to rename file %s' % (self._epid, cur_related_sub), logger.ERROR) + logger.error('%s: Unable to rename file %s' % (self._epid, cur_related_sub)) # save the ep with self.lock: @@ -5278,8 +5251,9 @@ class TVEpisode(TVEpisodeBase): """ has_timestamp = isinstance(self._timestamp, int) and 0 != self._timestamp if not has_timestamp and (not isinstance(self._airdate, datetime.date) or 1 == self._airdate.year): - logger.log('%s: Did not change modify date of %s because episode date is never aired or invalid' - % (self._show_obj.tvid_prodid, os.path.basename(self.location)), logger.DEBUG) + logger.debug(f'{self._show_obj.tvid_prodid}' + f': Did not change modify date of {os.path.basename(self.location)}' + f' because episode date is never aired or invalid') return aired_dt = None diff --git a/sickgear/tv_base.py b/sickgear/tv_base.py index d00dc284..0d4c8f45 100644 --- a/sickgear/tv_base.py +++ b/sickgear/tv_base.py @@ -42,8 +42,8 @@ class TVBase(object): setattr(self, attr_name, val) self.dirty = True else: - logger.log('Didn\'t change property "%s" because expected: %s, but got: %s with value: %s' % - (attr_name, types, type(val), val), logger.WARNING) + logger.warning(f'Didn\'t change property "{attr_name}" because expected: {types},' + f' but got: {type(val)} with value: {val}') return wrapper diff --git a/sickgear/tvcache.py b/sickgear/tvcache.py index cdcb4b8a..3bab265e 100644 --- a/sickgear/tvcache.py +++ b/sickgear/tvcache.py @@ -90,7 +90,7 @@ class TVCache(object): try: self.check_auth() except AuthException as e: - logger.log(u'Authentication error: ' + ex(e), logger.ERROR) + logger.error(f'Authentication error: {ex(e)}') return [] if self.should_update(): @@ -130,7 +130,7 @@ class TVCache(object): :return: :rtype: AnyStr """ - return u'' + title.replace(' ', '.') + return f'{title.replace(" ", ".")}' @staticmethod def _translate_link_url(url): @@ -159,8 +159,7 @@ class TVCache(object): return self.add_cache_entry(title, url) - logger.log('Data returned from the %s feed is incomplete, this result is unusable' % self.provider.name, - logger.DEBUG) + logger.debug('Data returned from the %s feed is incomplete, this result is unusable' % self.provider.name) def _get_last_update(self): """ @@ -276,7 +275,7 @@ class TVCache(object): parser = NameParser(show_obj=show_obj, convert=True, indexer_lookup=False) parse_result = parser.parse(name) except InvalidNameException: - logger.log('Unable to parse the filename %s into a valid episode' % name, logger.DEBUG) + logger.debug('Unable to parse the filename %s into a valid episode' % name) return except InvalidShowException: return @@ -312,7 +311,7 @@ class TVCache(object): # get version version = parse_result.version - logger.log('Add to cache: [%s]' % name, logger.DEBUG) + logger.debug('Add to cache: [%s]' % name) return [ 'INSERT OR IGNORE INTO provider_cache' @@ -406,7 +405,7 @@ class TVCache(object): # skip if provider is anime only and show is not anime if self.provider.anime_only and not show_obj.is_anime: - logger.debug(u'%s is not an anime, skipping' % show_obj.unique_name) + logger.debug(f'{show_obj.unique_name} is not an anime, skipping') continue # get season and ep data (ignoring multi-eps for now) @@ -424,8 +423,8 @@ class TVCache(object): # if the show says we want that episode then add it to the list if not show_obj.want_episode(season, ep_obj_list, quality, manual_search): - logger.log(u'Skipping ' + cur_result['name'] + ' because we don\'t want an episode that\'s ' + - Quality.qualityStrings[quality], logger.DEBUG) + logger.debug(f"Skipping {cur_result['name']}" + f" because we don't want an episode that's {Quality.qualityStrings[quality]}") continue ep_obj = show_obj.get_episode(season, ep_obj_list) @@ -434,7 +433,7 @@ class TVCache(object): title = cur_result['name'] url = cur_result['url'] - logger.log(u'Found result ' + title + ' at ' + url) + logger.log(f'Found result {title} at {url}') result = self.provider.get_result([ep_obj], url) if None is result: diff --git a/sickgear/version_checker.py b/sickgear/version_checker.py index 7da64b0e..aee6ccd1 100644 --- a/sickgear/version_checker.py +++ b/sickgear/version_checker.py @@ -277,17 +277,17 @@ class GitUpdateManager(UpdateManager): def _find_working_git(self): - logger.debug(u'Checking if git commands are available') + logger.debug('Checking if git commands are available') main_git = (sickgear.GIT_PATH, 'git')[not sickgear.GIT_PATH] _, _, exit_status = self._git_version(main_git) if 0 == exit_status: - logger.debug(u'Using: %s' % main_git) + logger.debug(f'Using: {main_git}') return main_git - logger.debug(u'Git not found: %s' % main_git) + logger.debug(f'Git not found: {main_git}') # trying alternatives @@ -301,12 +301,12 @@ class GitUpdateManager(UpdateManager): if main_git != main_git.lower(): alt_git_paths.append(main_git.lower()) if sickgear.GIT_PATH: - logger.debug(u'git.exe is missing, remove `git_path` from config.ini: %s' % main_git) + logger.debug(f'git.exe is missing, remove `git_path` from config.ini: {main_git}') if re.search(r' \(x86\)', main_git): alt_git_paths.append(re.sub(r' \(x86\)', '', main_git)) else: alt_git_paths.append(re.sub('Program Files', 'Program Files (x86)', main_git)) - logger.debug(u'Until `git_path` is removed by a config.ini edit, trying: %s' % alt_git_paths[-1]) + logger.debug(f'Until `git_path` is removed by a config.ini edit, trying: {alt_git_paths[-1]}') if alt_git_paths: logger.debug('Trying known alternative git locations') @@ -315,9 +315,9 @@ class GitUpdateManager(UpdateManager): _, _, exit_status = self._git_version(cur_git_path) if 0 == exit_status: - logger.debug(u'Using: %s' % cur_git_path) + logger.debug(f'Using: {cur_git_path}') return cur_git_path - logger.debug(u'Not using: %s' % cur_git_path) + logger.debug(f'Not using: {cur_git_path}') # Still haven't found a working git error_message = 'Unable to find your git executable - Shutdown SickGear and EITHER set git_path' \ @@ -337,15 +337,15 @@ class GitUpdateManager(UpdateManager): git_path = self._git_path if not git_path: - logger.error(u'No git specified, cannot use git commands') + logger.error('No git specified, cannot use git commands') return output, err, exit_status cmd = ' '.join([git_path] + arg_list) try: - logger.debug(u'Executing %s with your shell in %s' % (cmd, sickgear.PROG_DIR)) + logger.debug(f'Executing {cmd} with your shell in {sickgear.PROG_DIR}') output, err, exit_status = cmdline_runner([git_path] + arg_list, env={'LANG': 'en_US.UTF-8'}) - logger.debug(u'git output: %s' % output) + logger.debug(f'git output: {output}') except OSError: logger.log('Failed command: %s' % cmd) @@ -354,12 +354,12 @@ class GitUpdateManager(UpdateManager): logger.log('Failed command: %s, %s' % (cmd, ex(e))) if 0 == exit_status: - logger.debug(u'Successful return: %s' % cmd) + logger.debug(f'Successful return: {cmd}') exit_status = 0 self.unsafe = False elif 1 == exit_status: - logger.error(u'Failed: %s returned: %s' % (cmd, output)) + logger.error(f'Failed: {cmd} returned: {output}') elif 128 == exit_status or 'fatal:' in output or err: if 'unsafe repository' not in output and 'fatal:' in output: @@ -382,14 +382,14 @@ class GitUpdateManager(UpdateManager): except (BaseException, Exception): pass exit_status = 128 - msg = u'Fatal: %s returned: %s' % (cmd, output) + msg = f'Fatal: {cmd} returned: {output}' if 'develop' in output.lower() or 'main' in output.lower(): logger.error(msg) else: logger.debug(msg) else: - logger.error(u'Treat as error for now, command: %s returned: %s' % (cmd, output)) + logger.error(f'Treat as error for now, command: {cmd} returned: {output}') return output, err, exit_status @@ -405,7 +405,7 @@ class GitUpdateManager(UpdateManager): if 0 == exit_status and output: cur_commit_hash = output.strip() if not re.match(r'^[a-z0-9]+$', cur_commit_hash): - logger.error(u'Output doesn\'t look like a hash, not using it') + logger.error("Output doesn't look like a hash, not using it") return False self._cur_commit_hash = cur_commit_hash sickgear.CUR_COMMIT_HASH = str(cur_commit_hash) @@ -434,7 +434,7 @@ class GitUpdateManager(UpdateManager): _, _, exit_status = self._run_git(['fetch', '%s' % sickgear.GIT_REMOTE]) if 0 != exit_status: - logger.error(u'Unable to contact github, can\'t check for update') + logger.error("Unable to contact github, can't check for update") return if not self._cur_pr_number: @@ -446,14 +446,14 @@ class GitUpdateManager(UpdateManager): cur_commit_hash = output.strip() if not re.match('^[a-z0-9]+$', cur_commit_hash): - logger.debug(u'Output doesn\'t look like a hash, not using it') + logger.debug("Output doesn't look like a hash, not using it") return self._newest_commit_hash = cur_commit_hash self._old_commit_hash = cur_commit_hash self._old_branch = self._find_installed_branch() else: - logger.debug(u'git didn\'t return newest commit hash') + logger.debug("git didn't return newest commit hash") return # get number of commits behind and ahead (option --count not supported git < 1.7.2) @@ -466,11 +466,13 @@ class GitUpdateManager(UpdateManager): self._num_commits_ahead = int(output.count('>')) except (BaseException, Exception): - logger.debug(u'git didn\'t return numbers for behind and ahead, not using it') + logger.debug("git didn't return numbers for behind and ahead, not using it") return - logger.debug(u'cur_commit = %s, newest_commit = %s, num_commits_behind = %s, num_commits_ahead = %s' % ( - self._cur_commit_hash, self._newest_commit_hash, self._num_commits_behind, self._num_commits_ahead)) + logger.debug(f'cur_commit = {self._cur_commit_hash}' + f', newest_commit = {self._newest_commit_hash}' + f', num_commits_behind = {self._num_commits_behind}' + f', num_commits_ahead = {self._num_commits_ahead}') else: # we need to treat pull requests specially as it doesn't seem possible to set their "@{upstream}" tag output, _, _ = self._run_git(['ls-remote', '%s' % sickgear.GIT_REMOTE, @@ -512,7 +514,7 @@ class GitUpdateManager(UpdateManager): installed_branch = self._find_installed_branch() if self.branch != installed_branch: - logger.debug(u'Branch checkout: %s->%s' % (installed_branch, self.branch)) + logger.debug(f'Branch checkout: {installed_branch}->{self.branch}') return True self._find_installed_version() @@ -524,7 +526,7 @@ class GitUpdateManager(UpdateManager): try: self._check_github_for_update() except (BaseException, Exception) as e: - logger.error(u'Unable to contact github, can\'t check for update: %r' % e) + logger.error(f"Unable to contact github, can't check for update: {e!r}") return False if 0 < self._num_commits_behind: @@ -661,12 +663,12 @@ class SourceUpdateManager(UpdateManager): try: self._check_github_for_update() except (BaseException, Exception) as e: - logger.error(u'Unable to contact github, can\'t check for update: %r' % e) + logger.error(f"Unable to contact github, can't check for update: {e!r}") return False installed_branch = self._find_installed_branch() if self.branch != installed_branch: - logger.debug(u'Branch checkout: %s->%s' % (installed_branch, self.branch)) + logger.debug(f'Branch checkout: {installed_branch}->{self.branch}') return True if not self._cur_commit_hash or 0 < self._num_commits_behind: @@ -712,8 +714,9 @@ class SourceUpdateManager(UpdateManager): # when _cur_commit_hash doesn't match anything _num_commits_behind == 100 self._num_commits_behind += 1 - logger.debug(u'cur_commit = %s, newest_commit = %s, num_commits_behind = %s' - % (self._cur_commit_hash, self._newest_commit_hash, self._num_commits_behind)) + logger.debug(f'cur_commit = {self._cur_commit_hash}' + f', newest_commit = {self._newest_commit_hash}' + f', num_commits_behind = {self._num_commits_behind}') def set_newest_text(self): @@ -721,7 +724,7 @@ class SourceUpdateManager(UpdateManager): newest_text = None if not self._cur_commit_hash: - logger.debug(u'Unknown current version number, don\'t know if we should update or not') + logger.debug("Unknown current version number, don't know if we should update or not") newest_text = 'Unknown current version number: If you\'ve never used the SickGear upgrade system' \ ' before then current version is not set. — Update Now' \ @@ -751,48 +754,48 @@ class SourceUpdateManager(UpdateManager): try: # prepare the update dir - sg_update_dir = os.path.join(sickgear.PROG_DIR, u'sg-update') + sg_update_dir = os.path.join(sickgear.PROG_DIR, 'sg-update') if os.path.isdir(sg_update_dir): - logger.log(u'Clearing out update folder %s before extracting' % sg_update_dir) + logger.log(f'Clearing out update folder {sg_update_dir} before extracting') shutil.rmtree(sg_update_dir) - logger.log(u'Creating update folder %s before extracting' % sg_update_dir) + logger.log(f'Creating update folder {sg_update_dir} before extracting') os.makedirs(sg_update_dir) # retrieve file - logger.log(u'Downloading update from %r' % tar_download_url) - tar_download_path = os.path.join(sg_update_dir, u'sg-update.tar') + logger.log(f'Downloading update from {tar_download_url!r}') + tar_download_path = os.path.join(sg_update_dir, 'sg-update.tar') urllib.request.urlretrieve(tar_download_url, tar_download_path) if not os.path.isfile(tar_download_path): - logger.error(u'Unable to retrieve new version from %s, can\'t update' % tar_download_url) + logger.error(f"Unable to retrieve new version from {tar_download_url}, can't update") return False if not tarfile.is_tarfile(tar_download_path): - logger.error(u'Retrieved version from %s is corrupt, can\'t update' % tar_download_url) + logger.error(f"Retrieved version from {tar_download_url} is corrupt, can't update") return False # extract to sg-update dir - logger.log(u'Extracting file %s' % tar_download_path) + logger.log(f'Extracting file {tar_download_path}') tar = tarfile.open(tar_download_path) tar.extractall(sg_update_dir) tar.close() # delete .tar.gz - logger.log(u'Deleting file %s' % tar_download_path) + logger.log(f'Deleting file {tar_download_path}') os.remove(tar_download_path) # find update dir name update_dir_contents = [x for x in os.listdir(sg_update_dir) if os.path.isdir(os.path.join(sg_update_dir, x))] if 1 != len(update_dir_contents): - logger.error(u'Invalid update data, update failed: %s' % update_dir_contents) + logger.error(f'Invalid update data, update failed: {update_dir_contents}') return False content_dir = os.path.join(sg_update_dir, update_dir_contents[0]) # walk temp folder and move files to main folder - logger.log(u'Moving files from %s to %s' % (content_dir, sickgear.PROG_DIR)) + logger.log(f'Moving files from {content_dir} to {sickgear.PROG_DIR}') for dirname, dirnames, filenames in os.walk(content_dir): dirname = dirname[len(content_dir) + 1:] for curfile in filenames: @@ -808,7 +811,7 @@ class SourceUpdateManager(UpdateManager): os.remove(new_path) os.renames(old_path, new_path) except (BaseException, Exception) as e: - logger.debug(u'Unable to update %s: %s' % (new_path, ex(e))) + logger.debug(f'Unable to update {new_path}: {ex(e)}') os.remove(old_path) # Trash the updated file without moving in new path continue @@ -820,8 +823,8 @@ class SourceUpdateManager(UpdateManager): sickgear.CUR_COMMIT_BRANCH = self.branch except (BaseException, Exception) as e: - logger.error(u'Error while trying to update: %s' % ex(e)) - logger.debug(u'Traceback: %s' % traceback.format_exc()) + logger.error(f'Error while trying to update: {ex(e)}') + logger.debug(f'Traceback: {traceback.format_exc()}') return False # Notify update successful diff --git a/sickgear/watchedstate_queue.py b/sickgear/watchedstate_queue.py index 81c8d614..c7449506 100644 --- a/sickgear/watchedstate_queue.py +++ b/sickgear/watchedstate_queue.py @@ -56,7 +56,7 @@ class WatchedStateQueue(generic_queue.GenericQueue): # plex watched state item generic_queue.GenericQueue.add_item(self, item) else: - logger.log(u'Not adding item, it\'s already in the queue', logger.DEBUG) + logger.debug("Not adding item, it's already in the queue") class EmbyWatchedStateQueueItem(generic_queue.QueueItem): diff --git a/sickgear/webapi.py b/sickgear/webapi.py index b91e1625..f75c426a 100644 --- a/sickgear/webapi.py +++ b/sickgear/webapi.py @@ -253,7 +253,7 @@ class Api(webserve.BaseHandler): result = function(*ag) return result except Exception as e: - logger.log(ex(e), logger.ERROR) + logger.error(ex(e)) raise e def _out_as_json(self, dict): @@ -277,17 +277,17 @@ class Api(webserve.BaseHandler): self.apikey_name = '' if not sickgear.USE_API: - msg = u'%s - SB API Disabled. ACCESS DENIED' % remoteIp + msg = f'{remoteIp} - SB API Disabled. ACCESS DENIED' return False, msg, args, kwargs if not apiKey: - msg = u'%s - gave NO API KEY. ACCESS DENIED' % remoteIp + msg = f'{remoteIp} - gave NO API KEY. ACCESS DENIED' return False, msg, args, kwargs for realKey in realKeys: if apiKey == realKey[1]: self.apikey_name = realKey[0] - msg = u'%s - gave correct API KEY: %s. ACCESS GRANTED' % (remoteIp, realKey[0]) + msg = f'{remoteIp} - gave correct API KEY: {realKey[0]}. ACCESS GRANTED' return True, msg, args, kwargs - msg = u'%s - gave WRONG API KEY %s. ACCESS DENIED' % (remoteIp, apiKey) + msg = f'{remoteIp} - gave WRONG API KEY {apiKey}. ACCESS DENIED' return False, msg, args, kwargs @@ -306,10 +306,10 @@ def call_dispatcher(handler, args, kwargs): cmds = kwargs["cmd"] del kwargs["cmd"] - api_log(handler, u"cmd: '" + str(cmds) + "'", logger.DEBUG) - api_log(handler, u"all args: '" + str(args) + "'", logger.DEBUG) - api_log(handler, u"all kwargs: '" + str(kwargs) + "'", logger.DEBUG) - # logger.log(u"dateFormat: '" + str(dateFormat) + "'", logger.DEBUG) + api_log(handler, f'cmd: "{cmds}"', logger.DEBUG) + api_log(handler, f'all args: "{args}"', logger.DEBUG) + api_log(handler, f'all kwargs: "{kwargs}"', logger.DEBUG) + # logger.debug(f'dateFormat: "{dateFormat}"') outDict = {} @@ -626,14 +626,11 @@ class ApiCall(object): elif "ignore" == type: pass else: - self.log(u"Invalid param type set " + str(type) + " can not check or convert ignoring it", - logger.ERROR) + self.log(f"Invalid param type set {type} can not check or convert ignoring it", logger.ERROR) if error: # this is a real ApiError !! - raise ApiError( - u"param: '" + str(name) + "' with given value: '" + str(value) + "' could not be parsed into '" + str( - type) + "'") + raise ApiError(f'param: "{name}" with given value: "{value}" could not be parsed into "{type}"') return value @@ -654,8 +651,7 @@ class ApiCall(object): if error: # this is kinda a ApiError but raising an error is the only way of quitting here - raise ApiError(u"param: '" + str(name) + "' with given value: '" + str( - value) + "' is out of allowed range '" + str(allowedValues) + "'") + raise ApiError(f'param: "{name}" with given value: "{value}" is out of allowed range "{allowedValues}"') class TVDBShorthandWrapper(ApiCall): @@ -1369,8 +1365,8 @@ class CMD_SickGearEpisodeSetStatus(ApiCall): backlog_queue_item = search_queue.BacklogQueueItem(show_obj, segment) sickgear.search_queue_scheduler.action.add_item(backlog_queue_item) - self.log(u'Starting backlog for %s season %s because some episodes were set to WANTED' % - (show_obj.unique_name, season)) + self.log(f'Starting backlog for {show_obj.unique_name} season {season}' + f' because some episodes were set to WANTED') extra_msg = " Backlog started" @@ -3336,7 +3332,7 @@ class CMD_SickGearShowAddExisting(ApiCall): try: myShow = t[int(self.prodid), False] except BaseTVinfoError as e: - self.log(u"Unable to find show with id " + str(self.tvid), logger.WARNING) + self.log(f'Unable to find show with id {self.tvid}', logger.WARNING) return _responds(RESULT_FAILURE, msg="Unable to retrieve information from indexer") indexerName = None @@ -3499,7 +3495,7 @@ class CMD_SickGearShowAddNew(ApiCall): try: myShow = t[int(self.prodid), False] except BaseTVinfoError as e: - self.log(u"Unable to find show with id " + str(self.tvid), logger.WARNING) + self.log(f'Unable to find show with id {self.tvid}', logger.WARNING) return _responds(RESULT_FAILURE, msg="Unable to retrieve information from indexer") indexerName = None @@ -3520,11 +3516,11 @@ class CMD_SickGearShowAddNew(ApiCall): # don't create show dir if config says not to if sickgear.ADD_SHOWS_WO_DIR: - self.log(u"Skipping initial creation of " + showPath + " due to config.ini setting") + self.log(f'Skipping initial creation of {showPath} due to config.ini setting') else: dir_exists = helpers.make_dir(showPath) if not dir_exists: - self.log(u"Unable to create the folder " + showPath + ", can't add the show", logger.ERROR) + self.log(f"Unable to create the folder {showPath}, can't add the show", logger.ERROR) return _responds(RESULT_FAILURE, {"path": showPath}, "Unable to create the folder " + showPath + ", can't add the show") else: @@ -4440,7 +4436,7 @@ class CMD_SickGearShowUpdate(ApiCall): sickgear.show_queue_scheduler.action.update_show(show_obj, True) return _responds(RESULT_SUCCESS, msg='%s has queued to be updated' % show_obj.unique_name) except exceptions_helper.CantUpdateException as e: - self.log(u'Unable to update %s. %s' % (show_obj.unique_name, ex(e)), logger.ERROR) + self.log(f'Unable to update {show_obj.unique_name}. {ex(e)}', logger.ERROR) return _responds(RESULT_FAILURE, msg='Unable to update %s. %s' % (show_obj.unique_name, ex(e))) diff --git a/sickgear/webserve.py b/sickgear/webserve.py index 5b83341e..1a97fe8e 100644 --- a/sickgear/webserve.py +++ b/sickgear/webserve.py @@ -167,12 +167,12 @@ class BaseStaticFileHandler(StaticFileHandler): body = '\nRequest body: %s' % decode_str(self.request.body) except (BaseException, Exception): pass - logger.log('Sent %s error response to a `%s` request for `%s` with headers:\n%s%s' % - (status_code, self.request.method, self.request.path, self.request.headers, body), logger.WARNING) + logger.warning(f'Sent {status_code} error response to a `{self.request.method}`' + f' request for `{self.request.path}` with headers:\n' + f'{self.request.headers}{body}') # suppress traceback by removing 'exc_info' kwarg if 'exc_info' in kwargs: - logger.log('Gracefully handled exception text:\n%s' % traceback.format_exception(*kwargs["exc_info"]), - logger.DEBUG) + logger.debug('Gracefully handled exception text:\n%s' % traceback.format_exception(*kwargs["exc_info"])) del kwargs['exc_info'] return super(BaseStaticFileHandler, self).write_error(status_code, **kwargs) @@ -228,12 +228,11 @@ class RouteHandler(RequestHandler): body = '\nRequest body: %s' % decode_str(self.request.body) except (BaseException, Exception): pass - logger.log('Sent %s error response to a `%s` request for `%s` with headers:\n%s%s' % - (status_code, self.request.method, self.request.path, self.request.headers, body), logger.WARNING) + logger.warning(f'Sent {status_code} error response to a `{self.request.method}`' + f' request for `{self.request.path}` with headers:\n{self.request.headers}{body}') # suppress traceback by removing 'exc_info' kwarg if 'exc_info' in kwargs: - logger.log('Gracefully handled exception text:\n%s' % traceback.format_exception(*kwargs["exc_info"]), - logger.DEBUG) + logger.debug('Gracefully handled exception text:\n%s' % traceback.format_exception(*kwargs["exc_info"])) del kwargs['exc_info'] return super(RouteHandler, self).write_error(status_code, **kwargs) @@ -432,7 +431,7 @@ class CalendarHandler(BaseHandler): Works with iCloud, Google Calendar and Outlook. Provides a subscribeable URL for iCal subscriptions """ - logger.log(u'Receiving iCal request from %s' % self.request.remote_ip) + logger.log(f'Receiving iCal request from {self.request.remote_ip}') # Limit dates past_date = (datetime.date.today() + datetime.timedelta(weeks=-52)).toordinal() @@ -472,21 +471,17 @@ class CalendarHandler(BaseHandler): minutes=helpers.try_int(show['runtime'], 60)) # Create event for episode - ical += 'BEGIN:VEVENT%s' % crlf \ - + 'DTSTART:%sT%sZ%s' % (air_date_time.strftime('%Y%m%d'), - air_date_time.strftime('%H%M%S'), crlf) \ - + 'DTEND:%sT%sZ%s' % (air_date_time_end.strftime('%Y%m%d'), - air_date_time_end.strftime('%H%M%S'), crlf) \ - + u'SUMMARY:%s - %sx%s - %s%s' % (show['show_name'], episode['season'], episode['episode'], - episode['name'], crlf) \ - + u'UID:%s-%s-%s-E%sS%s%s' % (appname, datetime.date.today().isoformat(), - show['show_name'].replace(' ', '-'), - episode['episode'], episode['season'], crlf) \ - + u'DESCRIPTION:%s on %s' % ((show['airs'] or '(Unknown airs)'), - (show['network'] or 'Unknown network')) \ - + ('' if not episode['description'] - else u'%s%s' % (nl, episode['description'].splitlines()[0])) \ - + '%sEND:VEVENT%s' % (crlf, crlf) + desc = '' if not episode['description'] else f'{nl}{episode["description"].splitlines()[0]}' + ical += (f'BEGIN:VEVENT{crlf}' + f'DTSTART:{air_date_time.strftime("%Y%m%d")}T{air_date_time.strftime("%H%M%S")}Z{crlf}' + f'DTEND:{air_date_time_end.strftime("%Y%m%d")}T{air_date_time_end.strftime("%H%M%S")}Z{crlf}' + f'SUMMARY:{show["show_name"]} - {episode["season"]}x{episode["episode"]}' + f' - {episode["name"]}{crlf}' + f'UID:{appname}-{datetime.date.today().isoformat()}-{show["show_name"].replace(" ", "-")}' + f'-E{episode["episode"]}S{episode["season"]}{crlf}' + f'DESCRIPTION:{(show["airs"] or "(Unknown airs)")} on {(show["network"] or "Unknown network")}' + f'{desc}{crlf}' + f'END:VEVENT{crlf}') # Ending the iCal return ical + 'END:VCALENDAR' @@ -499,7 +494,7 @@ class RepoHandler(BaseStaticFileHandler): kodi_is_legacy = None def parse_url_path(self, url_path): - logger.log('Kodi req... get(path): %s' % url_path, logger.DEBUG) + logger.debug('Kodi req... get(path): %s' % url_path) return super(RepoHandler, self).parse_url_path(url_path) def set_extra_headers(self, *args, **kwargs): @@ -514,7 +509,7 @@ class RepoHandler(BaseStaticFileHandler): super(RepoHandler, self).initialize(*args, **kwargs) - logger.log('Kodi req... initialize(path): %s' % kwargs['path'], logger.DEBUG) + logger.debug('Kodi req... initialize(path): %s' % kwargs['path']) cache_client = os.path.join(sickgear.CACHE_DIR, 'clients') cache_client_kodi = os.path.join(cache_client, 'kodi') cache_client_kodi_watchedstate = os.path.join(cache_client_kodi, 'service.sickgear.watchedstate.updater') @@ -583,7 +578,7 @@ class RepoHandler(BaseStaticFileHandler): # Force a UNIX line ending, like the md5sum utility. with io.open(os.path.join(zip_path, '%s.md5' % zip_name), 'w', newline='\n') as zh: - zh.write(u'%s *%s\n' % (self.md5ify(zip_data), zip_name)) + zh.write(f'{self.md5ify(zip_data)} *{zip_name}\n') aid, ver = self.repo_sickgear_details() save_zip(aid, ver, os.path.join(cache_client_kodi, 'repository.sickgear'), @@ -739,7 +734,7 @@ class RepoHandler(BaseStaticFileHandler): def md5ify(string): if not isinstance(string, binary_type): string = string.encode('utf-8') - return u'%s' % hashlib.new('md5', string).hexdigest() + return f'{hashlib.new("md5", string).hexdigest()}' def kodi_repository_sickgear_zip(self): bfr = io.BytesIO() @@ -753,7 +748,7 @@ class RepoHandler(BaseStaticFileHandler): infile = fh.read() zh.writestr('repository.sickgear/icon.png', infile, zipfile.ZIP_DEFLATED) except OSError as e: - logger.log('Unable to zip: %r / %s' % (e, ex(e)), logger.WARNING) + logger.warning('Unable to zip: %r / %s' % (e, ex(e))) zip_data = bfr.getvalue() bfr.close() @@ -792,7 +787,7 @@ class RepoHandler(BaseStaticFileHandler): zh.writestr(os.path.relpath(direntry.path.replace(self.kodi_legacy, ''), basepath), infile, zipfile.ZIP_DEFLATED) except OSError as e: - logger.log('Unable to zip %s: %r / %s' % (direntry.path, e, ex(e)), logger.WARNING) + logger.warning('Unable to zip %s: %r / %s' % (direntry.path, e, ex(e))) zip_data = bfr.getvalue() bfr.close() @@ -1466,7 +1461,7 @@ r.close() if not bname: msg = 'Missing media file name provided' data[k] = msg - logger.log('Update watched state skipped an item: %s' % msg, logger.WARNING) + logger.warning('Update watched state skipped an item: %s' % msg) continue if bname in ep_results: @@ -1494,7 +1489,7 @@ r.close() if as_json: if not data: data = dict(error='Request made to SickGear with invalid payload') - logger.log('Update watched state failed: %s' % data['error'], logger.WARNING) + logger.warning('Update watched state failed: %s' % data['error']) return json_dumps(data) @@ -1628,13 +1623,13 @@ class Home(MainHandler): images_path = os.path.join(sickgear.PROG_DIR, 'gui', 'slick', 'images', 'network') for cur_show_obj in sickgear.showList: network_name = 'nonetwork' if None is cur_show_obj.network \ - else cur_show_obj.network.replace(u'\u00C9', 'e').lower() + else cur_show_obj.network.replace('\u00C9', 'e').lower() if network_name not in networks: - filename = u'%s.png' % network_name + filename = f'{network_name}.png' if not os.path.isfile(os.path.join(images_path, filename)): - filename = u'%s.png' % re.sub(r'(?m)(.*)\s+\(\w{2}\)$', r'\1', network_name) + filename = '%s.png' % re.sub(r'(?m)(.*)\s+\(\w{2}\)$', r'\1', network_name) if not os.path.isfile(os.path.join(images_path, filename)): - filename = u'nonetwork.png' + filename = 'nonetwork.png' networks.setdefault(network_name, filename) t.network_images.setdefault(cur_show_obj.tvid_prodid, networks[network_name]) @@ -1690,10 +1685,10 @@ class Home(MainHandler): authed, auth_msg = sab.test_authentication(host, username, password, apikey) if authed: - return u'Success. Connected %s authentication' % \ - ('using %s' % access_msg, 'with no')['None' == auth_msg.lower()] - return u'Authentication failed. %s' % auth_msg - return u'Unable to connect to host' + return f'Success. Connected' \ + f' {(f"using {access_msg}", "with no")["None" == auth_msg.lower()]} authentication' + return f'Authentication failed. {auth_msg}' + return 'Unable to connect to host' def test_nzbget(self, host=None, use_https=None, username=None, password=None): self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') @@ -2022,10 +2017,10 @@ class Home(MainHandler): def check_update(self): # force a check to see if there is a new version if sickgear.update_software_scheduler.action.check_for_new_version(force=True): - logger.log(u'Forced version check found results') + logger.log('Forced version check found results') if sickgear.update_packages_scheduler.action.check_for_new_version(force=True): - logger.log(u'Forced package version check found results') + logger.log('Forced package version check found results') self.redirect('/home/') @@ -2591,7 +2586,7 @@ class Home(MainHandler): new_prodid=m_prodid, force_id=True, set_pause=set_pause, mark_wanted=mark_wanted) except (BaseException, Exception) as e: - logger.log('Could not add show %s to switch queue: %s' % (show_obj.tvid_prodid, ex(e)), logger.WARNING) + logger.warning('Could not add show %s to switch queue: %s' % (show_obj.tvid_prodid, ex(e))) ui.notifications.message('TV info source switch', 'Queued switch of tv info source') return {'Success': 'Switched to new TV info source'} @@ -2658,12 +2653,12 @@ class Home(MainHandler): else: msg = 'Main ID unchanged, because show from %s with ID: %s exists in DB.' % \ (sickgear.TVInfoAPI(m_tvid).name, mtvid_prodid) - logger.log(msg, logger.WARNING) + logger.warning(msg) ui.notifications.message(*[s.strip() for s in msg.split(',')]) except MultipleShowObjectsException: msg = 'Main ID unchanged, because show from %s with ID: %s exists in DB.' % \ (sickgear.TVInfoAPI(m_tvid).name, m_prodid) - logger.log(msg, logger.WARNING) + logger.warning(msg) ui.notifications.message(*[s.strip() for s in msg.split(',')]) response.update({ @@ -2943,9 +2938,9 @@ class Home(MainHandler): old_path = os.path.normpath(show_obj._location) new_path = os.path.normpath(location) if old_path != new_path: - logger.log(u'%s != %s' % (old_path, new_path), logger.DEBUG) + logger.debug(f'{old_path} != {new_path}') if not os.path.isdir(new_path) and not sickgear.CREATE_MISSING_SHOW_DIRS: - errors.append(u'New location %s does not exist' % new_path) + errors.append(f'New location {new_path} does not exist') # don't bother if we're going to update anyway elif not do_update: @@ -2960,9 +2955,8 @@ class Home(MainHandler): # show_obj.load_episodes_from_tvinfo() # rescan the episodes in the new folder except exceptions_helper.NoNFOException: - errors.append( - u"The folder at %s doesn't contain a tvshow.nfo - " - u"copy your files to that folder before you change the directory in SickGear." % new_path) + errors.append(f'The folder at {new_path} doesn"t contain a tvshow.nfo -' + f' copy your files to that folder before you change the directory in SickGear.') # save it to the DB show_obj.save_to_db() @@ -3175,7 +3169,7 @@ class Home(MainHandler): sql_l = [] for cur_ep in eps.split('|'): - logger.log(u'Attempting to set status on episode %s to %s' % (cur_ep, status), logger.DEBUG) + logger.debug(f'Attempting to set status on episode {cur_ep} to {status}') ep_obj = show_obj.get_episode(*tuple([int(x) for x in cur_ep.split('x')])) @@ -3205,7 +3199,7 @@ class Home(MainHandler): err_msg = 'to downloaded because it\'s not snatched/downloaded/archived' if err_msg: - logger.log('Refusing to change status of %s %s' % (cur_ep, err_msg), logger.ERROR) + logger.error('Refusing to change status of %s %s' % (cur_ep, err_msg)) continue if ARCHIVED == status: @@ -3239,31 +3233,31 @@ class Home(MainHandler): if season not in season_wanted: season_wanted += [season] - season_list += u'
  • Season %s
  • ' % season - logger.log((u'Not adding wanted eps to backlog search for %s season %s because show is paused', - u'Starting backlog search for %s season %s because eps were set to wanted')[ + season_list += f'
  • Season {season}
  • ' + logger.log(('Not adding wanted eps to backlog search for %s season %s because show is paused', + 'Starting backlog search for %s season %s because eps were set to wanted')[ not show_obj.paused] % (show_obj.unique_name, season)) - (title, msg) = (('Not starting backlog', u'Paused show prevented backlog search'), - ('Backlog started', u'Backlog search started'))[not show_obj.paused] + (title, msg) = (('Not starting backlog', 'Paused show prevented backlog search'), + ('Backlog started', 'Backlog search started'))[not show_obj.paused] if segments: ui.notifications.message(title, - u'%s for the following seasons of %s:
      %s
    ' - % (msg, show_obj.unique_name, season_list)) + f'{msg} for the following seasons of {show_obj.unique_name}:
    ' + f'
      {season_list}
    ') else: ui.notifications.message('Not starting backlog', 'No provider has active searching enabled') elif FAILED == status: - msg = u'Retrying search automatically for the following season of %s:
      ' % show_obj.unique_name + msg = f'Retrying search automatically for the following season of {show_obj.unique_name}:
        ' for season, segment in iteritems(segments): # type: int, List[sickgear.tv.TVEpisode] cur_failed_queue_item = search_queue.FailedQueueItem(show_obj, segment) sickgear.search_queue_scheduler.action.add_item(cur_failed_queue_item) msg += '
      • Season %s
      • ' % season - logger.log(u'Retrying search for %s season %s because some eps were set to failed' % - (show_obj.unique_name, season)) + logger.log(f'Retrying search for {show_obj.unique_name} season {season}' + f' because some eps were set to failed') msg += '
      ' @@ -3359,7 +3353,7 @@ class Home(MainHandler): tvid_prodid_obj.list + [ep_info[0], ep_info[1]]) if not sql_result: - logger.log(u'Unable to find an episode for ' + cur_ep + ', skipping', logger.WARNING) + logger.warning(f'Unable to find an episode for {cur_ep}, skipping') continue related_ep_result = my_db.select('SELECT * FROM tv_episodes WHERE location = ? AND episode != ?', [sql_result[0]['location'], ep_info[1]]) @@ -3895,8 +3889,8 @@ class HomeProcessMedia(Home): skip_failure_processing = nzbget_call and not nzbget_dupekey if nzbget_call and sickgear.NZBGET_SCRIPT_VERSION != kwargs.get('pp_version', '0'): - logger.log('Calling SickGear-NG.py script %s is not current version %s, please update.' % - (kwargs.get('pp_version', '0'), sickgear.NZBGET_SCRIPT_VERSION), logger.ERROR) + logger.error(f'Calling SickGear-NG.py script {kwargs.get("pp_version", "0")} is not current version' + f' {sickgear.NZBGET_SCRIPT_VERSION}, please update.') if sickgear.NZBGET_SKIP_PM and nzbget_call and nzbget_dupekey and nzb_name and show_obj: processTV.process_minimal(nzb_name, show_obj, @@ -3933,9 +3927,9 @@ class HomeProcessMedia(Home): regexp = re.compile(r'(?i)', flags=re.UNICODE) result = regexp.sub('\n', result) if None is not quiet and 1 == int(quiet): - regexp = re.compile(u'(?i)]+>([^<]+)', flags=re.UNICODE) - return u'%s' % regexp.sub(r'\1', result) - return self._generic_message('Postprocessing results', u'
      %s
      ' % result) + regexp = re.compile('(?i)]+>([^<]+)', flags=re.UNICODE) + return regexp.sub(r'\1', result) + return self._generic_message('Postprocessing results', f'
      {result}
      ') # noinspection PyPep8Naming def processEpisode(self, dir_name=None, nzb_name=None, process_type=None, **kwargs): @@ -5027,13 +5021,13 @@ class AddShows(Home): normalised = resp else: for item in resp: - normalised.append({u'show': item}) + normalised.append({'show': item}) del resp except TraktAuthException as e: - logger.log(u'Pin authorisation needed to connect to Trakt service: %s' % ex(e), logger.WARNING) + logger.warning(f'Pin authorisation needed to connect to Trakt service: {ex(e)}') error_msg = 'Unauthorized: Get another pin in the Notifications Trakt settings' except TraktException as e: - logger.log(u'Could not connect to Trakt service: %s' % ex(e), logger.WARNING) + logger.warning(f'Could not connect to Trakt service: {ex(e)}') except exceptions_helper.ConnectionSkipException as e: logger.log('Skipping Trakt because of previous failure: %s' % ex(e)) except (IndexError, KeyError): @@ -6031,8 +6025,7 @@ class AddShows(Home): series_pieces = which_series.split('|') if (which_series and root_dir) or (which_series and full_show_path and 1 < len(series_pieces)): if 4 > len(series_pieces): - logger.log('Unable to add show due to show selection. Not enough arguments: %s' % (repr(series_pieces)), - logger.ERROR) + logger.error(f'Unable to add show due to show selection. Not enough arguments: {repr(series_pieces)}') ui.notifications.error('Unknown error. Unable to add show due to problem with show selection.') return self.redirect('/add-shows/import/') @@ -6058,7 +6051,7 @@ class AddShows(Home): # if the dir exists, do 'add existing show' if os.path.isdir(show_dir) and not full_show_path: - ui.notifications.error('Unable to add show', u'Found existing folder: ' + show_dir) + ui.notifications.error('Unable to add show', f'Found existing folder: {show_dir}') return self.redirect( '/add-shows/import?tvid_prodid=%s%s%s&hash_dir=%s%s' % (tvid, TVidProdid.glue, prodid, re.sub('[^a-z]', '', sg_helpers.md5_for_text(show_dir)), @@ -6066,11 +6059,11 @@ class AddShows(Home): # don't create show dir if config says not to if sickgear.ADD_SHOWS_WO_DIR: - logger.log(u'Skipping initial creation due to config.ini setting (add_shows_wo_dir)') + logger.log('Skipping initial creation due to config.ini setting (add_shows_wo_dir)') else: if not helpers.make_dir(show_dir): - logger.log(u'Unable to add show because can\'t create folder: ' + show_dir, logger.ERROR) - ui.notifications.error('Unable to add show', u'Can\'t create folder: ' + show_dir) + logger.error(f"Unable to add show because can't create folder: {show_dir}") + ui.notifications.error('Unable to add show', f"Can't create folder: {show_dir}") return self.redirect('/home/') helpers.chmod_as_parent(show_dir) @@ -6880,8 +6873,7 @@ class Manage(MainHandler): base_dir = dir_map[cur_root_dir] new_show_dir = os.path.join(base_dir, cur_show_dir) # noinspection PyProtectedMember - logger.log(u'For show %s changing dir from %s to %s' % - (show_obj.unique_name, show_obj._location, new_show_dir)) + logger.log(f'For show {show_obj.unique_name} changing dir from {show_obj._location} to {new_show_dir}') else: # noinspection PyProtectedMember new_show_dir = show_obj._location @@ -6960,7 +6952,7 @@ class Manage(MainHandler): prune=new_prune, tag=new_tag, direct_call=True) if cur_errors: - logger.log(u'Errors: ' + str(cur_errors), logger.ERROR) + logger.error(f'Errors: {cur_errors}') errors.append('%s:\n
        ' % show_obj.unique_name + ' '.join( ['
      • %s
      • ' % error for error in cur_errors]) + '
      ') @@ -7086,7 +7078,7 @@ class Manage(MainHandler): new_show_id = new_show.split(':') new_tvid = int(new_show_id[0]) if new_tvid not in tv_sources: - logger.log('Skipping %s because target is not a valid source' % show, logger.WARNING) + logger.warning('Skipping %s because target is not a valid source' % show) errors.append('Skipping %s because target is not a valid source' % show) continue try: @@ -7094,7 +7086,7 @@ class Manage(MainHandler): except (BaseException, Exception): show_obj = None if not show_obj: - logger.log('Skipping %s because source is not a valid show' % show, logger.WARNING) + logger.warning('Skipping %s because source is not a valid show' % show) errors.append('Skipping %s because source is not a valid show' % show) continue if 2 == len(new_show_id): @@ -7104,21 +7096,20 @@ class Manage(MainHandler): except (BaseException, Exception): new_show_obj = None if new_show_obj: - logger.log('Skipping %s because target show with that id already exists in db' % show, - logger.WARNING) + logger.warning('Skipping %s because target show with that id already exists in db' % show) errors.append('Skipping %s because target show with that id already exists in db' % show) continue else: new_prodid = None if show_obj.tvid == new_tvid and (not new_prodid or new_prodid == show_obj.prodid): - logger.log('Skipping %s because target same as source' % show, logger.WARNING) + logger.warning('Skipping %s because target same as source' % show) errors.append('Skipping %s because target same as source' % show) continue try: sickgear.show_queue_scheduler.action.switch_show(show_obj=show_obj, new_tvid=new_tvid, new_prodid=new_prodid, force_id=force_id) except (BaseException, Exception) as e: - logger.log('Could not add show %s to switch queue: %s' % (show_obj.tvid_prodid, ex(e)), logger.WARNING) + logger.warning('Could not add show %s to switch queue: %s' % (show_obj.tvid_prodid, ex(e))) errors.append('Could not add show %s to switch queue: %s' % (show_obj.tvid_prodid, ex(e))) return json_dumps(({'result': 'success'}, {'errors': ', '.join(errors)})[0 < len(errors)]) @@ -7174,7 +7165,7 @@ class ManageSearch(Manage): # force it to run the next time it looks if not sickgear.search_queue_scheduler.action.is_standard_backlog_in_progress(): sickgear.backlog_search_scheduler.force_search(force_type=FORCED_BACKLOG) - logger.log(u'Backlog search forced') + logger.log('Backlog search forced') ui.notifications.message('Backlog search started') time.sleep(5) @@ -7186,7 +7177,7 @@ class ManageSearch(Manage): if not sickgear.search_queue_scheduler.action.is_recentsearch_in_progress(): result = sickgear.recent_search_scheduler.force_run() if result: - logger.log(u'Recent search forced') + logger.log('Recent search forced') ui.notifications.message('Recent search started') time.sleep(5) @@ -7197,7 +7188,7 @@ class ManageSearch(Manage): # force it to run the next time it looks result = sickgear.proper_finder_scheduler.force_run() if result: - logger.log(u'Find propers search forced') + logger.log('Find propers search forced') ui.notifications.message('Find propers search started') time.sleep(5) @@ -7307,7 +7298,7 @@ class ShowTasks(Manage): result = sickgear.show_update_scheduler.force_run() if result: - logger.log(u'Show Update forced') + logger.log('Show Update forced') ui.notifications.message('Forced Show Update started') time.sleep(5) @@ -7658,7 +7649,7 @@ class History(MainHandler): hosts, keys, message = client.check_config(sickgear.EMBY_HOST, sickgear.EMBY_APIKEY) if sickgear.USE_EMBY and hosts: - logger.log('Updating Emby watched episode states', logger.DEBUG) + logger.debug('Updating Emby watched episode states') rd = sickgear.ROOT_DIRS.split('|')[1:] \ + [x.split('=')[0] for x in sickgear.EMBY_PARENT_MAPS.split(',') if any(x)] @@ -7744,8 +7735,8 @@ class History(MainHandler): except (BaseException, Exception): continue if mapping: - logger.log('Folder mappings used, the first of %s is [%s] in Emby is [%s] in SickGear' % - (mapped, mapping[0], mapping[1]), logger.DEBUG) + logger.debug(f'Folder mappings used, the first of {mapped} is [{mapping[0]}] in Emby is' + f' [{mapping[1]}] in SickGear') if states: # Prune user removed items that are no longer being returned by API @@ -7767,7 +7758,7 @@ class History(MainHandler): hosts = [x.strip().lower() for x in sickgear.PLEX_SERVER_HOST.split(',')] if sickgear.USE_PLEX and hosts: - logger.log('Updating Plex watched episode states', logger.DEBUG) + logger.debug('Updating Plex watched episode states') from lib.plex import Plex @@ -7785,7 +7776,7 @@ class History(MainHandler): # noinspection HttpUrlsUsage parts = re.search(r'(.*):(\d+)$', urlparse('http://' + re.sub(r'^\w+://', '', cur_host)).netloc) if not parts: - logger.log('Skipping host not in min. host:port format : %s' % cur_host, logger.WARNING) + logger.warning('Skipping host not in min. host:port format : %s' % cur_host) elif parts.group(1): plex.plex_host = parts.group(1) if None is not parts.group(2): @@ -7810,11 +7801,10 @@ class History(MainHandler): idx += 1 - logger.log('Fetched %s of %s played for host : %s' % (len(plex.show_states), played, cur_host), - logger.DEBUG) + logger.debug('Fetched %s of %s played for host : %s' % (len(plex.show_states), played, cur_host)) if mapping: - logger.log('Folder mappings used, the first of %s is [%s] in Plex is [%s] in SickGear' % - (mapped, mapping[0], mapping[1]), logger.DEBUG) + logger.debug(f'Folder mappings used, the first of {mapped} is [{mapping[0]}] in Plex is' + f' [{mapping[1]}] in SickGear') if states: # Prune user removed items that are no longer being returned by API @@ -7866,7 +7856,7 @@ class History(MainHandler): result = helpers.remove_file(cur_result['location']) if result: - logger.log(u'%s file %s' % (result, cur_result['location'])) + logger.log(f'{result} file {cur_result["location"]}') deleted.update({cur_result['tvep_id']: row_show_ids[cur_result['rowid']]}) if row_show_ids[cur_result['rowid']] not in refresh: @@ -8075,7 +8065,7 @@ class ConfigGeneral(Config): # Return a hex digest of the md5, e.g. 49f68a5c8493ec2c0bf489821c21fc3b app_name = kwargs.get('app_name') app_name = '' if not app_name else ' for [%s]' % app_name - logger.log(u'New API generated%s' % app_name) + logger.log(f'New API generated{app_name}') return result @@ -8134,7 +8124,7 @@ class ConfigGeneral(Config): result['result'] = 'Failed: apikey already exists, try again' else: sickgear.API_KEYS.append([app_name, api_key]) - logger.log('Created apikey for [%s]' % app_name, logger.DEBUG) + logger.debug('Created apikey for [%s]' % app_name) result.update(dict(result='Success: apikey added', added=api_key)) sickgear.USE_API = 1 sickgear.save_config() @@ -8153,7 +8143,7 @@ class ConfigGeneral(Config): result['result'] = 'Failed: key doesn\'t exist' else: sickgear.API_KEYS = [ak for ak in sickgear.API_KEYS if ak[0] and api_key != ak[1]] - logger.log('Revoked [%s] apikey [%s]' % (app_name, api_key), logger.DEBUG) + logger.debug('Revoked [%s] apikey [%s]' % (app_name, api_key)) result.update(dict(result='Success: apikey removed', removed=True)) sickgear.save_config() ui.notifications.message('Configuration Saved', os.path.join(sickgear.CONFIG_FILE)) @@ -8196,7 +8186,7 @@ class ConfigGeneral(Config): with sickgear.show_update_scheduler.lock: sickgear.show_update_scheduler.start_time = datetime.time(hour=sickgear.SHOW_UPDATE_HOUR) except (BaseException, Exception) as e: - logger.log('Could not change Show Update Scheduler time: %s' % ex(e), logger.ERROR) + logger.error('Could not change Show Update Scheduler time: %s' % ex(e)) sickgear.TRASH_REMOVE_SHOW = config.checkbox_to_value(trash_remove_show) sg_helpers.TRASH_REMOVE_SHOW = sickgear.TRASH_REMOVE_SHOW sickgear.TRASH_ROTATE_LOGS = config.checkbox_to_value(trash_rotate_logs) @@ -8231,14 +8221,14 @@ class ConfigGeneral(Config): # not deleted. Deduped list order preservation is key to feature function. my_db = db.DBConnection() sql_result = my_db.select('SELECT DISTINCT tag FROM tv_shows') - new_names = [u'' + v.strip() for v in (show_tags.split(u','), [])[None is show_tags] if v.strip()] + new_names = [v.strip() for v in (show_tags.split(','), [])[None is show_tags] if v.strip()] orphans = [item for item in [v['tag'] for v in sql_result or []] if item not in new_names] cleanser = [] if 0 < len(orphans): cleanser = [item for item in sickgear.SHOW_TAGS if item in orphans or item in new_names] - results += [u'An attempt was prevented to remove a show list group name still in use'] + results += ['An attempt was prevented to remove a show list group name still in use'] dedupe = {} - sickgear.SHOW_TAGS = [dedupe.setdefault(item, item) for item in (cleanser + new_names + [u'Show List']) + sickgear.SHOW_TAGS = [dedupe.setdefault(item, item) for item in (cleanser + new_names + ['Show List']) if item not in dedupe] sickgear.HOME_SEARCH_FOCUS = config.checkbox_to_value(home_search_focus) @@ -8251,7 +8241,7 @@ class ConfigGeneral(Config): sickgear.DATE_PRESET = date_preset if time_preset: sickgear.TIME_PRESET_W_SECONDS = time_preset - sickgear.TIME_PRESET = sickgear.TIME_PRESET_W_SECONDS.replace(u':%S', u'') + sickgear.TIME_PRESET = sickgear.TIME_PRESET_W_SECONDS.replace(':%S', '') sickgear.TIMEZONE_DISPLAY = timezone_display # Web interface @@ -8307,7 +8297,7 @@ class ConfigGeneral(Config): if 0 < len(results): for v in results: - logger.log(v, logger.ERROR) + logger.error(v) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: @@ -8331,7 +8321,7 @@ class ConfigGeneral(Config): pulls = sickgear.update_software_scheduler.action.list_remote_pulls() return json_dumps({'result': 'success', 'pulls': pulls}) except (BaseException, Exception) as e: - logger.log(u'exception msg: ' + ex(e), logger.DEBUG) + logger.debug(f'exception msg: {ex(e)}') return json_dumps({'result': 'fail'}) @staticmethod @@ -8340,7 +8330,7 @@ class ConfigGeneral(Config): branches = sickgear.update_software_scheduler.action.list_remote_branches() return json_dumps({'result': 'success', 'branches': branches, 'current': sickgear.BRANCH or 'main'}) except (BaseException, Exception) as e: - logger.log(u'exception msg: ' + ex(e), logger.DEBUG) + logger.debug(f'exception msg: {ex(e)}') return json_dumps({'result': 'fail'}) @@ -8465,7 +8455,7 @@ class ConfigSearch(Config): sickgear.TORRENT_LABEL = torrent_label sickgear.TORRENT_LABEL_VAR = config.to_int((0, torrent_label_var)['rtorrent' == torrent_method], 1) if not (0 <= sickgear.TORRENT_LABEL_VAR <= 5): - logger.log('Setting rTorrent custom%s is not 0-5, defaulting to custom1' % torrent_label_var, logger.DEBUG) + logger.debug('Setting rTorrent custom%s is not 0-5, defaulting to custom1' % torrent_label_var) sickgear.TORRENT_LABEL_VAR = 1 sickgear.TORRENT_VERIFY_CERT = config.checkbox_to_value(torrent_verify_cert) sickgear.TORRENT_PATH = torrent_path @@ -8478,7 +8468,7 @@ class ConfigSearch(Config): if 0 < len(results): for x in results: - logger.log(x, logger.ERROR) + logger.error(x) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: @@ -8605,7 +8595,7 @@ class ConfigMediaProcess(Config): if 0 < len(results): for x in results: - logger.log(x, logger.ERROR) + logger.error(x) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: @@ -8679,7 +8669,7 @@ class ConfigMediaProcess(Config): except (BaseException, Exception) as e: msg = ex(e) - logger.log(u'Rar Not Supported: %s' % msg, logger.ERROR) + logger.error(f'Rar Not Supported: {msg}') return 'not supported' @@ -9019,7 +9009,7 @@ class ConfigProviders(Config): if 0 < len(results): for x in results: - logger.log(x, logger.ERROR) + logger.error(x) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: ui.notifications.message('Configuration Saved', os.path.join(sickgear.CONFIG_FILE)) @@ -9286,7 +9276,7 @@ class ConfigNotifications(Config): if 0 < len(results): for x in results: - logger.log(x, logger.ERROR) + logger.error(x) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: @@ -9341,7 +9331,7 @@ class ConfigSubtitles(Config): if 0 < len(results): for x in results: - logger.log(x, logger.ERROR) + logger.error(x) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: @@ -9374,7 +9364,7 @@ class ConfigAnime(Config): if 0 < len(results): for x in results: - logger.log(x, logger.ERROR) + logger.error(x) ui.notifications.error('Error(s) Saving Configuration', '
      \n'.join(results)) else: diff --git a/sickgear/webserveInit.py b/sickgear/webserveInit.py index 025afaa6..8a46f1d4 100644 --- a/sickgear/webserveInit.py +++ b/sickgear/webserveInit.py @@ -74,7 +74,7 @@ class WebServer(threading.Thread): # If either the HTTPS certificate or key do not exist, make some self-signed ones. if make_cert: if not create_https_certificates(self.https_cert, self.https_key): - logger.log(u'Unable to create CERT/KEY files, disabling HTTPS') + logger.log('Unable to create CERT/KEY files, disabling HTTPS') update_cfg |= False is not sickgear.ENABLE_HTTPS sickgear.ENABLE_HTTPS = False self.enable_https = False @@ -82,7 +82,7 @@ class WebServer(threading.Thread): update_cfg = True if not (os.path.isfile(self.https_cert) and os.path.isfile(self.https_key)): - logger.log(u'Disabled HTTPS because of missing CERT and KEY files', logger.WARNING) + logger.warning('Disabled HTTPS because of missing CERT and KEY files') update_cfg |= False is not sickgear.ENABLE_HTTPS sickgear.ENABLE_HTTPS = False self.enable_https = False @@ -231,7 +231,7 @@ class WebServer(threading.Thread): protocol, ssl_options = (('http', None), ('https', {'certfile': self.https_cert, 'keyfile': self.https_key}))[self.enable_https] - logger.log(u'Starting SickGear on %s://%s:%s/' % (protocol, self.options['host'], self.options['port'])) + logger.log(f'Starting SickGear on {protocol}://{self.options["host"]}:{self.options["port"]}/') # python 3 needs to start event loop first import asyncio @@ -244,8 +244,7 @@ class WebServer(threading.Thread): xheaders=sickgear.HANDLE_REVERSE_PROXY, protocol=protocol) except (BaseException, Exception): etype, evalue, etb = exc_info() - logger.log('Could not start webserver on %s. Exception: %s, Error: %s' % ( - self.options['port'], etype, evalue), logger.ERROR) + logger.error(f'Could not start webserver on {self.options["port"]}. Exception: {etype}, Error: {evalue}') return self.io_loop = IOLoop.current() @@ -271,7 +270,7 @@ class WebServer(threading.Thread): getattr(s, nh)() sickgear.classes.loading_msg.reset() self.io_loop.add_callback(d_f, self, new_handler) - logger.log('Switching HTTP Server handlers to %s' % new_handler, logger.DEBUG) + logger.debug('Switching HTTP Server handlers to %s' % new_handler) def shut_down(self): self.alive = False diff --git a/tests/migration_tests.py b/tests/migration_tests.py index 2a2d3a6c..a03cd27c 100644 --- a/tests/migration_tests.py +++ b/tests/migration_tests.py @@ -109,22 +109,18 @@ class OldInitialSchema(db.SchemaUpgrade): if cur_db_version < MIN_DB_VERSION: logger.log_error_and_exit( - u'Your database version (' - + str(cur_db_version) - + ') is too old to migrate from what this version of SickGear supports (' - + str(MIN_DB_VERSION) + ').' + '\n' - + 'Upgrade using a previous version (tag) build 496 to build 501 of SickGear first or' - ' remove database file to begin fresh.' + f'Your database version ({str(cur_db_version)}) is too old to migrate from what' + f' this version of SickGear supports ({str(MIN_DB_VERSION)}).\n' + f'Upgrade using a previous version (tag) build 496 to build 501 of SickGear first' + f' or remove database file to begin fresh.' ) if cur_db_version > MAX_DB_VERSION: logger.log_error_and_exit( - u'Your database version (' - + str(cur_db_version) - + ') has been incremented past what this version of SickGear supports (' - + str(MAX_DB_VERSION) + ').' + '\n' - + 'If you have used other forks of SickGear,' - ' your database may be unusable due to their modifications.' + f'Your database version ({str(cur_db_version)}) has been incremented past what' + f' this version of SickGear supports ({str(MAX_DB_VERSION)}).\n' + f'If you have used other forks of SickGear,' + f' your database may be unusable due to their modifications.' ) return self.call_check_db_version() diff --git a/tests/name_parser_tests.py b/tests/name_parser_tests.py index 6e052375..05cefe99 100644 --- a/tests/name_parser_tests.py +++ b/tests/name_parser_tests.py @@ -367,16 +367,16 @@ combination_test_cases = [ ] unicode_test_cases = [ - (u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', + ('The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', parser.ParseResult( - u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', - u'The Big Bang Theory', 2, [7], u'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', u'SHELDON', + 'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', + 'The Big Bang Theory', 2, [7], 'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', 'SHELDON', version=-1) ), ('The.Big.Bang.Theory.2x07.The.Panty.Pi\xc3\xb1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', parser.ParseResult( - u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', - u'The Big Bang Theory', 2, [7], u'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', u'SHELDON', + 'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv', + 'The Big Bang Theory', 2, [7], 'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', 'SHELDON', version=-1) ), ] diff --git a/tests/newznab_tests.py b/tests/newznab_tests.py index 25bacb50..a5afea27 100644 --- a/tests/newznab_tests.py +++ b/tests/newznab_tests.py @@ -39,12 +39,12 @@ item_parse_test_cases = [ ('Show.Name.S02E04.720p.HDTV.x264-GROUP', 'https://test.h')), (('Show.Name.S02E05.720p.HDTV.x264-GROUP-JUNK[JUNK]', 'https://test.h'), ('Show.Name.S02E05.720p.HDTV.x264-GROUP', 'https://test.h')), - ((u'Show.Name.S02E06.720p.HDTV.x264-GROUP-JUNK[JUNK帝]', 'https://test.h'), - (u'Show.Name.S02E06.720p.HDTV.x264-GROUP', 'https://test.h')), - ((u'Show.Name.S02E07-EpName帝.720p.HDTV.x264-GROUP帝-JUNK[JUNK帝]', 'https://test.h'), - (u'Show.Name.S02E07-EpName帝.720p.HDTV.x264-GROUP帝', 'https://test.h')), - ((u'[grp 帝] Show Name - 11 [1024x576 h264 AAC ger-sub][123456].mp4', 'https://test.h'), - (u'[grp.帝].Show.Name.-.11.[1024x576.h264.AAC.ger-sub][123456]', 'https://test.h')), + (('Show.Name.S02E06.720p.HDTV.x264-GROUP-JUNK[JUNK帝]', 'https://test.h'), + ('Show.Name.S02E06.720p.HDTV.x264-GROUP', 'https://test.h')), + (('Show.Name.S02E07-EpName帝.720p.HDTV.x264-GROUP帝-JUNK[JUNK帝]', 'https://test.h'), + ('Show.Name.S02E07-EpName帝.720p.HDTV.x264-GROUP帝', 'https://test.h')), + (('[grp 帝] Show Name - 11 [1024x576 h264 AAC ger-sub][123456].mp4', 'https://test.h'), + ('[grp.帝].Show.Name.-.11.[1024x576.h264.AAC.ger-sub][123456]', 'https://test.h')), ] size_test_cases = [ diff --git a/tests/scene_helpers_tests.py b/tests/scene_helpers_tests.py index e49e27aa..29f80deb 100644 --- a/tests/scene_helpers_tests.py +++ b/tests/scene_helpers_tests.py @@ -100,8 +100,8 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase): sickgear.showDict[s.sid_int] = s scene_exceptions.retrieve_exceptions() name_cache.build_name_cache() - self.assertEqual(scene_exceptions.get_scene_exception_by_name(u'ブラック・ラグーン'), [1, 79604, -1]) - self.assertEqual(scene_exceptions.get_scene_exception_by_name(u'Burakku Ragūn'), [1, 79604, -1]) + self.assertEqual(scene_exceptions.get_scene_exception_by_name('ブラック・ラグーン'), [1, 79604, -1]) + self.assertEqual(scene_exceptions.get_scene_exception_by_name('Burakku Ragūn'), [1, 79604, -1]) self.assertEqual(scene_exceptions.get_scene_exception_by_name('Rokka no Yuusha'), [1, 295243, -1]) def test_sceneExceptionByNameEmpty(self): diff --git a/tests/test_lib.py b/tests/test_lib.py index e3fe8be3..58b07ac8 100644 --- a/tests/test_lib.py +++ b/tests/test_lib.py @@ -42,10 +42,10 @@ TESTDBNAME = 'sickbeard.db' TESTCACHEDBNAME = 'cache.db' TESTFAILEDDBNAME = 'failed.db' -SHOWNAME = u'show name' +SHOWNAME = 'show name' SEASON = 4 EPISODE = 2 -FILENAME = u'show name - s0' + str(SEASON) + 'e0' + str(EPISODE) + '.mkv' +FILENAME = f'show name - s0{SEASON}e0{EPISODE}.mkv' FILEDIR = os.path.join(TESTDIR, SHOWNAME) FILEPATH = os.path.join(FILEDIR, FILENAME) diff --git a/tests/xem_tests.py b/tests/xem_tests.py index 81fa0cfb..1d3cb6d6 100644 --- a/tests/xem_tests.py +++ b/tests/xem_tests.py @@ -61,12 +61,12 @@ class XEMBasicTests(test.SickbeardTestDBCase): curRegex = '^' + escaped_name + r'\W+(?:(?:S\d[\dE._ -])|(?:\d\d?x)|(?:\d{4}\W\d\d\W\d\d)|(?:(?:part|pt)' \ r'[\._ -]?(\d|[ivx]))|Season\W+\d+\W+|E\d+\W+|(?:\d{1,3}.+\d{1,}[a-zA-Z]{2}' \ r'\W+[a-zA-Z]{3,}\W+\d{4}.+))' - # print(u"Checking if show " + name + " matches " + curRegex) + # print("Checking if show " + name + " matches " + curRegex) # noinspection PyUnusedLocal match = re.search(curRegex, name, re.I) # if match: - # print(u"Matched " + curRegex + " to " + name) + # print("Matched " + curRegex + " to " + name) if '__main__' == __name__: