Change py2 deprecation cleanups.

Remove py2 part from _23.py
Remove more mapped stuff.
Replace filter_iter with native filter.
Replace map_iter with native map.
Remove unidecode from _23 (empty wrapper on py3).
Remove map_list and replace with native list(map( for performance reasons.
Replace filter_list with list(filter.
Replace list_keys with list(.
Replace list_values with list(...values()).
Replace list_items with list(....items()).
Replace ordered_dict with dict.
Fix tvinfo base type docs.
Remove py2 parts from sg_futures.
Remove scandir lib ...  it's a sub module of os in py3.
Remove PY2 stuff.
Ignore unknown ids for characters/persons.
Fix tvdb image parsing.
Ignore unknown id sources on person page.
This commit is contained in:
Prinz23 2023-02-11 18:02:58 +00:00 committed by JackDandy
parent 0989a51e85
commit 8ddffb7882
108 changed files with 460 additions and 2923 deletions

View file

@ -182,7 +182,11 @@ def param(visible=True, rid=None, cache_person=None, cache_char=None, person=Non
#end if #end if
#set $section_links = False #set $section_links = False
#set $all_sources = $TVInfoAPI().all_sources
#for $cur_src, $cur_sid in sorted(iteritems($person.ids)) #for $cur_src, $cur_sid in sorted(iteritems($person.ids))
#if $cur_src not in $all_sources:
#continue
#end if
#if $TVInfoAPI($cur_src).config.get('people_url') #if $TVInfoAPI($cur_src).config.get('people_url')
#if not $section_links #if not $section_links
#set $section_links = True #set $section_links = True

View file

@ -13,7 +13,6 @@
#from sickgear.sgdatetime import * #from sickgear.sgdatetime import *
<% def sg_var(varname, default=False): return getattr(sickgear, varname, default) %>#slurp# <% def sg_var(varname, default=False): return getattr(sickgear, varname, default) %>#slurp#
<% def sg_str(varname, default=''): return getattr(sickgear, varname, default) %>#slurp# <% def sg_str(varname, default=''): return getattr(sickgear, varname, default) %>#slurp#
#from _23 import list_keys
## ##
#set global $title = 'Config - General' #set global $title = 'Config - General'
#set global $header = 'General Settings' #set global $header = 'General Settings'
@ -846,7 +845,7 @@
<span class="component-title">File logging level:</span> <span class="component-title">File logging level:</span>
<span class="component-desc"> <span class="component-desc">
<select id="file_logging_presets" name="file_logging_preset" class="form-control input-sm"> <select id="file_logging_presets" name="file_logging_preset" class="form-control input-sm">
#set $levels = $list_keys(file_logging_presets) #set $levels = $list(file_logging_presets)
#set void = $levels.sort(key=lambda x: $file_logging_presets[$x]) #set void = $levels.sort(key=lambda x: $file_logging_presets[$x])
#set $level_count = len($levels) #set $level_count = len($levels)
#for $level in $levels #for $level in $levels

View file

@ -1,6 +1,5 @@
#import sickgear #import sickgear
#from sickgear.common import Quality, qualityPresets, qualityPresetStrings #from sickgear.common import Quality, qualityPresets, qualityPresetStrings
#from _23 import filter_list
## ##
#set $html_checked = ' checked="checked"' #set $html_checked = ' checked="checked"'
#set $html_selected = ' selected="selected"' #set $html_selected = ' selected="selected"'
@ -35,7 +34,7 @@
<span id="wanted-quality" class="component-desc"> <span id="wanted-quality" class="component-desc">
<p>select one or more qualities; the best one found when searching will be used</p> <p>select one or more qualities; the best one found when searching will be used</p>
#set $any_quality_list = filter_list(lambda x: x > $Quality.NONE and x < $Quality.UNKNOWN, $Quality.qualityStrings) #set $any_quality_list = list(filter(lambda x: x > $Quality.NONE and x < $Quality.UNKNOWN, $Quality.qualityStrings))
#set $has_unknown = False #set $has_unknown = False
#for $cur_quality in sorted($any_quality_list): #for $cur_quality in sorted($any_quality_list):
##set $has_unknown |= ($Quality.UNKNOWN == $cur_quality and $cur_quality in $any_qualities) ##set $has_unknown |= ($Quality.UNKNOWN == $cur_quality and $cur_quality in $any_qualities)
@ -62,7 +61,7 @@
</div> </div>
<span id="upgrade-quality" class="component-desc"> <span id="upgrade-quality" class="component-desc">
<p>optional, upgrade existing media to any selected quality</p> <p>optional, upgrade existing media to any selected quality</p>
#set $best_quality_list = filter_list(lambda x: x > $Quality.SDTV and x < $Quality.UNKNOWN, $Quality.qualityStrings) #set $best_quality_list = list(filter(lambda x: x > $Quality.SDTV and x < $Quality.UNKNOWN, $Quality.qualityStrings))
#for $cur_quality in sorted($best_quality_list): #for $cur_quality in sorted($best_quality_list):
<a href="#" data-quality="$cur_quality" class="btn btn-inverse dark-bg#echo ('', ' active')[$cur_quality in $best_qualities]#" role="button"><i class="icon-glyph searchadd"></i>$Quality.get_quality_ui($cur_quality)</a> <a href="#" data-quality="$cur_quality" class="btn btn-inverse dark-bg#echo ('', ' active')[$cur_quality in $best_qualities]#" role="button"><i class="icon-glyph searchadd"></i>$Quality.get_quality_ui($cur_quality)</a>
#if $cur_quality in [$Quality.SDDVD, $Quality.FULLHDTV, $Quality.FULLHDBLURAY] #if $cur_quality in [$Quality.SDDVD, $Quality.FULLHDTV, $Quality.FULLHDBLURAY]
@ -85,7 +84,7 @@
<span class="component-desc bfr"> <span class="component-desc bfr">
<div style="float:left;padding-right:28px"> <div style="float:left;padding-right:28px">
<h4 class="jumbo">Wanted</h4> <h4 class="jumbo">Wanted</h4>
#set $any_quality_list = filter_list(lambda x: x > $Quality.NONE, $Quality.qualityStrings) #set $any_quality_list = list(filter(lambda x: x > $Quality.NONE, $Quality.qualityStrings))
<select id="wanted-qualities" name="any_qualities" multiple="multiple" size="$len($any_quality_list)" class="form-control form-control-inline input-sm"> <select id="wanted-qualities" name="any_qualities" multiple="multiple" size="$len($any_quality_list)" class="form-control form-control-inline input-sm">
#for $cur_quality in sorted($any_quality_list): #for $cur_quality in sorted($any_quality_list):
@ -96,7 +95,7 @@
<div style="float:left;padding-right:20px"> <div style="float:left;padding-right:20px">
<h4 class="jumbo">Upgrade to</h4> <h4 class="jumbo">Upgrade to</h4>
#set $best_quality_list = filter_list(lambda x: x > $Quality.SDTV and x < $Quality.UNKNOWN, $Quality.qualityStrings) #set $best_quality_list = list(filter(lambda x: x > $Quality.SDTV and x < $Quality.UNKNOWN, $Quality.qualityStrings))
<select id="upgrade-qualities" name="best_qualities" multiple="multiple" size="$len($best_quality_list)" class="form-control form-control-inline input-sm"> <select id="upgrade-qualities" name="best_qualities" multiple="multiple" size="$len($best_quality_list)" class="form-control form-control-inline input-sm">
#for $cur_quality in sorted($best_quality_list): #for $cur_quality in sorted($best_quality_list):
<option value="$cur_quality"#echo ('', $html_selected)[$cur_quality in $best_qualities]#>$Quality.get_quality_ui($cur_quality)</option> <option value="$cur_quality"#echo ('', $html_selected)[$cur_quality in $best_qualities]#>$Quality.get_quality_ui($cur_quality)</option>

View file

@ -2,7 +2,6 @@
#from sickgear.common import Quality, qualityPresets, qualityPresetStrings, SD #from sickgear.common import Quality, qualityPresets, qualityPresetStrings, SD
#from sickgear.indexers.indexer_config import TVINFO_TVMAZE, TVINFO_TVDB #from sickgear.indexers.indexer_config import TVINFO_TVMAZE, TVINFO_TVDB
#from lib import exceptions_helper as exceptions #from lib import exceptions_helper as exceptions
#from _23 import filter_list
<% def sg_var(varname, default=False): return getattr(sickgear, varname, default) %>#slurp# <% def sg_var(varname, default=False): return getattr(sickgear, varname, default) %>#slurp#
<% def sg_str(varname, default=''): return getattr(sickgear, varname, default) %>#slurp# <% def sg_str(varname, default=''): return getattr(sickgear, varname, default) %>#slurp#
## ##
@ -69,7 +68,7 @@
<div id="custom-quality" class="show-if-quality-custom"> <div id="custom-quality" class="show-if-quality-custom">
<div class="manageCustom pull-left"> <div class="manageCustom pull-left">
<h4 style="font-size:14px">Initial</h4> <h4 style="font-size:14px">Initial</h4>
#set $anyQualityList = filter_list(lambda x: x > $Quality.NONE, $Quality.qualityStrings) #set $anyQualityList = list(filter(lambda x: x > $Quality.NONE, $Quality.qualityStrings))
<select id="wanted-qualities" name="any_qualities" multiple="multiple" size="$len($anyQualityList)"> <select id="wanted-qualities" name="any_qualities" multiple="multiple" size="$len($anyQualityList)">
#for $curQuality in sorted($anyQualityList): #for $curQuality in sorted($anyQualityList):
<option value="$curQuality" #if $curQuality in $anyQualities then $selected else ''#>$Quality.get_quality_ui($curQuality)</option> <option value="$curQuality" #if $curQuality in $anyQualities then $selected else ''#>$Quality.get_quality_ui($curQuality)</option>
@ -78,7 +77,7 @@
</div> </div>
<div class="manageCustom pull-left"> <div class="manageCustom pull-left">
<h4 style="font-size:14px">Upgrade to</h4> <h4 style="font-size:14px">Upgrade to</h4>
#set $bestQualityList = filter_list(lambda x: x > $Quality.SDTV, $Quality.qualityStrings) #set $bestQualityList = list(filter(lambda x: x > $Quality.SDTV, $Quality.qualityStrings))
<select id="upgrade-qualities" name="best_qualities" multiple="multiple" size="$len($bestQualityList)"> <select id="upgrade-qualities" name="best_qualities" multiple="multiple" size="$len($bestQualityList)">
#for $curQuality in sorted($bestQualityList): #for $curQuality in sorted($bestQualityList):
<option value="$curQuality" #if $curQuality in $bestQualities then $selected else ''#>$Quality.get_quality_ui($curQuality)</option> <option value="$curQuality" #if $curQuality in $bestQualities then $selected else ''#>$Quality.get_quality_ui($curQuality)</option>

View file

@ -3,7 +3,6 @@
#from sickgear.common import * #from sickgear.common import *
#from sickgear.logger import reverseNames #from sickgear.logger import reverseNames
#from sickgear.helpers import maybe_plural #from sickgear.helpers import maybe_plural
#from _23 import list_keys
## ##
#set global $header = 'Log File' #set global $header = 'Log File'
#set global $title = 'Logs' #set global $title = 'Logs'
@ -23,7 +22,7 @@
<div class="h2footer pull-right"> <div class="h2footer pull-right">
<select name="minLevel" id="minLevel" class="form-control form-control-inline input-sm pull-right"> <select name="minLevel" id="minLevel" class="form-control form-control-inline input-sm pull-right">
#set $levels = $list_keys($reverseNames) #set $levels = $list($reverseNames)
#set void = $levels.sort(key=lambda x: $reverseNames[$x]) #set void = $levels.sort(key=lambda x: $reverseNames[$x])
#set $level_count = len($levels) #set $level_count = len($levels)
#for $level in $levels #for $level in $levels

View file

@ -19,8 +19,8 @@ import datetime
from collections import deque from collections import deque
from itertools import islice from itertools import islice
from sys import version_info from sys import version_info
from base64 import encodebytes as b64encodebytes
from six import binary_type, moves
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
from six.moves.urllib.parse import quote, quote_plus, unquote as six_unquote, unquote_plus as six_unquote_plus, \ from six.moves.urllib.parse import quote, quote_plus, unquote as six_unquote, unquote_plus as six_unquote_plus, \
urlencode, urlsplit, urlunparse, urlunsplit urlencode, urlsplit, urlunparse, urlunsplit
@ -42,15 +42,11 @@ if False:
PY38 = version_info[0:2] >= (3, 8) PY38 = version_info[0:2] >= (3, 8)
""" one off consumables (Iterators) """
filter_iter = moves.filter # type: Callable[[Callable, Iterable], Iterator]
map_iter = moves.map # type: Callable[[Callable, ...], Iterator]
def map_consume(*args): def map_consume(*args):
# type: (...) -> None # type: (...) -> None
"""Run a lambda over elements without returning anything""" """Run a lambda over elements without returning anything"""
deque(moves.map(*args), maxlen=0) deque(map(*args), maxlen=0)
def consume(iterator, n=None): def consume(iterator, n=None):
@ -76,7 +72,7 @@ def consume(iterator, n=None):
def decode_str(s, encoding='utf-8', errors=None): def decode_str(s, encoding='utf-8', errors=None):
# type: (...) -> AnyStr # type: (...) -> AnyStr
if isinstance(s, binary_type): if isinstance(s, bytes):
if None is errors: if None is errors:
return s.decode(encoding) return s.decode(encoding)
return s.decode(encoding, errors) return s.decode(encoding, errors)
@ -99,7 +95,7 @@ def html_unescape(s):
def list_range(*args, **kwargs): def list_range(*args, **kwargs):
# type: (...) -> List # type: (...) -> List
return list(moves.range(*args, **kwargs)) return list(range(*args, **kwargs))
def urlparse(url, scheme='', allow_fragments=True): def urlparse(url, scheme='', allow_fragments=True):
@ -135,181 +131,45 @@ def b64encodestring(s, keep_eol=False):
return data.rstrip() return data.rstrip()
if 2 != version_info[0]: # noinspection PyUnresolvedReferences,PyProtectedMember
# --------- # noinspection PyUnresolvedReferences,PyCompatibility
# Python 3+ from configparser import ConfigParser
# --------- # noinspection PyUnresolvedReferences
# noinspection PyUnresolvedReferences,PyProtectedMember from enum import Enum
from base64 import decodebytes, encodebytes # noinspection PyUnresolvedReferences
b64decodebytes = decodebytes from os import scandir, DirEntry
b64encodebytes = encodebytes # noinspection PyUnresolvedReferences
# noinspection PyUnresolvedReferences,PyCompatibility from itertools import zip_longest
from configparser import ConfigParser # noinspection PyUnresolvedReferences
# noinspection PyUnresolvedReferences from inspect import getfullargspec as getargspec
from enum import Enum
# noinspection PyUnresolvedReferences
from os import scandir, DirEntry
# noinspection PyUnresolvedReferences
from itertools import zip_longest
# noinspection PyUnresolvedReferences
from inspect import getfullargspec as getargspec
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
from subprocess import Popen from subprocess import Popen
# noinspection PyUnresolvedReferences, PyPep8Naming # noinspection PyUnresolvedReferences, PyPep8Naming
import xml.etree.ElementTree as etree import xml.etree.ElementTree as etree
ordered_dict = dict native_timestamp = datetime.datetime.timestamp # type: Callable[[datetime.datetime], float]
native_timestamp = datetime.datetime.timestamp # type: Callable[[datetime.datetime], float]
def unquote(string, encoding='utf-8', errors='replace'): def unquote(string, encoding='utf-8', errors='replace'):
return decode_str(six_unquote(decode_str(string, encoding, errors), encoding=encoding, errors=errors), return decode_str(six_unquote(decode_str(string, encoding, errors), encoding=encoding, errors=errors),
encoding, errors) encoding, errors)
def unquote_plus(string, encoding='utf-8', errors='replace'):
return decode_str(six_unquote_plus(decode_str(string, encoding, errors), encoding=encoding, errors=errors),
encoding, errors)
def decode_bytes(d, encoding='utf-8', errors='replace'): def unquote_plus(string, encoding='utf-8', errors='replace'):
if not isinstance(d, binary_type): return decode_str(six_unquote_plus(decode_str(string, encoding, errors), encoding=encoding, errors=errors),
# noinspection PyArgumentList encoding, errors)
return bytes(d, encoding=encoding, errors=errors)
return d
def filter_list(*args):
# type: (...) -> List
return list(filter(*args))
def list_items(d): def decode_bytes(d, encoding='utf-8', errors='replace'):
# type: (Dict) -> List[Tuple[Any, Any]] if not isinstance(d, bytes):
""" # noinspection PyArgumentList
equivalent to python 2 .items() return bytes(d, encoding=encoding, errors=errors)
""" return d
return list(d.items())
def list_keys(d):
# type: (Dict) -> List
"""
equivalent to python 2 .keys()
"""
return list(d)
def list_values(d): def map_none(*args):
# type: (Dict) -> List # type: (...) -> List
""" return list(zip_longest(*args))
equivalent to python 2 .values()
"""
return list(d.values())
def map_list(*args):
# type: (...) -> List
return list(map(*args))
def map_none(*args):
# type: (...) -> List
return list(zip_longest(*args))
def unidecode(data):
# type: (AnyStr) -> AnyStr
return data
else:
# ---------
# Python 2
# ---------
import time
from lib.unidecode import unidecode as unicode_decode
# noinspection PyProtectedMember,PyDeprecation
from base64 import decodestring, encodestring
# noinspection PyDeprecation
b64decodebytes = decodestring
# noinspection PyDeprecation
b64encodebytes = encodestring
# noinspection PyUnresolvedReferences
from lib.backports.configparser import ConfigParser
# noinspection PyUnresolvedReferences
from lib.enum34 import Enum
# noinspection PyProtectedMember,PyUnresolvedReferences
from lib.scandir.scandir import scandir, GenericDirEntry as DirEntry
# noinspection PyUnresolvedReferences,PyDeprecation
from inspect import getargspec
try:
# noinspection PyPep8Naming
import xml.etree.cElementTree as etree
except ImportError:
# noinspection PyPep8Naming
import xml.etree.ElementTree as etree
from collections import OrderedDict
ordered_dict = OrderedDict
def _totimestamp(dt=None):
# type: (datetime.datetime) -> float
""" This function should only be used in this module due to its 1970s+ limitation as that's all we need here and
sgdatatime can't be used at this module level
"""
return time.mktime(dt.timetuple())
native_timestamp = _totimestamp # type: Callable[[datetime.datetime], float]
from subprocess import Popen as _Popen
class Popen(_Popen):
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
for x in filter_iter(lambda y: y, [self.stdout, self.stderr, self.stdin]):
x.close()
self.wait()
def unquote(string, encoding='utf-8', errors='replace'):
return decode_str(six_unquote(decode_str(string, encoding, errors)), encoding, errors)
def unquote_plus(string, encoding='utf-8', errors='replace'):
return decode_str(six_unquote_plus(decode_str(string, encoding, errors)), encoding, errors)
# noinspection PyUnusedLocal
def decode_bytes(d, encoding='utf-8', errors='replace'):
if not isinstance(d, binary_type):
return bytes(d)
return d
def filter_list(*args):
# type: (...) -> List
# noinspection PyTypeChecker
return filter(*args)
def list_items(d):
# type: (Dict) -> List[Tuple[Any, Any]]
# noinspection PyTypeChecker
return d.items()
def list_keys(d):
# type: (Dict) -> List
# noinspection PyTypeChecker
return d.keys()
def list_values(d):
# type: (Dict) -> List
# noinspection PyTypeChecker
return d.values()
def map_list(*args):
# type: (...) -> List
# noinspection PyTypeChecker
return map(*args)
def map_none(*args):
# type: (...) -> List
# noinspection PyTypeChecker
return map(None, *args)
def unidecode(data):
# type: (AnyStr) -> AnyStr
# noinspection PyUnresolvedReferences
return isinstance(data, unicode) and unicode_decode(data) or data

View file

@ -21,7 +21,6 @@ from lib.tvinfo_base import CastList, PersonGenders, RoleTypes, \
from json_helper import json_dumps from json_helper import json_dumps
from sg_helpers import clean_data, get_url, iterate_chunk, try_int from sg_helpers import clean_data, get_url, iterate_chunk, try_int
from _23 import filter_list
from six import iteritems from six import iteritems
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -682,12 +681,12 @@ class TmdbIndexer(TVInfoBase):
season_cast_obj['id'] for season_cast_obj in season_cast_obj['id'] for season_cast_obj in
season_data[season_obj[0]].get('cast') or []]) season_data[season_obj[0]].get('cast') or []])
for person_obj in sorted(filter_list(lambda a: a['id'] in main_cast_ids, for person_obj in sorted(list(filter(lambda a: a['id'] in main_cast_ids,
show_data['aggregate_credits']['cast'] or [])[:50], show_data['aggregate_credits']['cast'] or []))[:50],
key=lambda c: (main_cast_ids.get(c['id'], 0) or 0, key=lambda c: (main_cast_ids.get(c['id'], 0) or 0,
c['total_episode_count'], c['order'] * -1), reverse=True): c['total_episode_count'], c['order'] * -1), reverse=True):
for character in sorted(filter_list(lambda b: b['credit_id'] in main_cast_credit_ids, for character in sorted(list(filter(lambda b: b['credit_id'] in main_cast_credit_ids,
person_obj.get('roles', []) or []), person_obj.get('roles', []) or [])),
key=lambda c: c['episode_count'], reverse=True): key=lambda c: c['episode_count'], reverse=True):
character_obj = TVInfoCharacter( character_obj = TVInfoCharacter(
name=clean_data(character['character']), name=clean_data(character['character']),

View file

@ -39,7 +39,6 @@ from lib.tvinfo_base import CastList, TVInfoCharacter, CrewList, TVInfoPerson, R
from .tvdb_exceptions import TvdbError, TvdbShownotfound, TvdbTokenexpired from .tvdb_exceptions import TvdbError, TvdbShownotfound, TvdbTokenexpired
from .tvdb_ui import BaseUI, ConsoleUI from .tvdb_ui import BaseUI, ConsoleUI
from _23 import filter_list, list_keys, list_values, map_list
from six import integer_types, iteritems, PY2, string_types from six import integer_types, iteritems, PY2, string_types
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -290,7 +289,7 @@ class Tvdb(TVInfoBase):
'nl': 'nld', 'no': 'nor', 'nl': 'nld', 'no': 'nor',
'pl': 'pol', 'pt': 'pot', 'ru': 'rus', 'sk': 'slv', 'sv': 'swe', 'zh': 'zho', '_1': 'srp', 'pl': 'pol', 'pt': 'pot', 'ru': 'rus', 'sk': 'slv', 'sv': 'swe', 'zh': 'zho', '_1': 'srp',
} }
self.config['valid_languages_3'] = list_values(self.config['langabbv_23']) self.config['valid_languages_3'] = list(self.config['langabbv_23'].values())
# TheTvdb.com should be based around numeric language codes, # TheTvdb.com should be based around numeric language codes,
# but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16 # but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
@ -358,7 +357,7 @@ class Tvdb(TVInfoBase):
else: else:
d_m = shows d_m = shows
if d_m: if d_m:
results = map_list(map_data, [d_m['data']]) results = list(map(map_data, [d_m['data']]))
if ids.get(TVINFO_TVDB_SLUG): if ids.get(TVINFO_TVDB_SLUG):
cache_id_key = 's-id-%s-%s' % (TVINFO_TVDB, ids[TVINFO_TVDB_SLUG]) cache_id_key = 's-id-%s-%s' % (TVINFO_TVDB, ids[TVINFO_TVDB_SLUG])
is_none, shows = self._get_cache_entry(cache_id_key) is_none, shows = self._get_cache_entry(cache_id_key)
@ -373,7 +372,7 @@ class Tvdb(TVInfoBase):
if d_m: if d_m:
for r in d_m: for r in d_m:
if ids.get(TVINFO_TVDB_SLUG) == r['slug']: if ids.get(TVINFO_TVDB_SLUG) == r['slug']:
results = map_list(map_data, [r]) results = list(map(map_data, [r]))
break break
if name: if name:
for n in ([name], name)[isinstance(name, list)]: for n in ([name], name)[isinstance(name, list)]:
@ -390,7 +389,7 @@ class Tvdb(TVInfoBase):
if r: if r:
if not isinstance(r, list): if not isinstance(r, list):
r = [r] r = [r]
results.extend(map_list(map_data, r)) results.extend(list(map(map_data, r)))
seen = set() seen = set()
results = [seen.add(r['id']) or r for r in results if r['id'] not in seen] results = [seen.add(r['id']) or r for r in results if r['id'] not in seen]
@ -613,8 +612,8 @@ class Tvdb(TVInfoBase):
# type: (int, Optional[str]) -> Optional[dict] # type: (int, Optional[str]) -> Optional[dict]
results = self.search_tvs(sid, language=language) results = self.search_tvs(sid, language=language)
for cur_result in (isinstance(results, dict) and results.get('results') or []): for cur_result in (isinstance(results, dict) and results.get('results') or []):
result = filter_list(lambda r: 'series' == r['type'] and sid == r['id'], result = list(filter(lambda r: 'series' == r['type'] and sid == r['id'],
cur_result.get('nbHits') and cur_result.get('hits') or []) cur_result.get('nbHits') and cur_result.get('hits') or []))
if 1 == len(result): if 1 == len(result):
result[0]['overview'] = self.clean_overview( result[0]['overview'] = self.clean_overview(
result[0]['overviews'][self.config['langabbv_23'].get(language) or 'eng']) result[0]['overviews'][self.config['langabbv_23'].get(language) or 'eng'])
@ -627,7 +626,7 @@ class Tvdb(TVInfoBase):
# notify of new keys # notify of new keys
if ENV.get('SG_DEV_MODE'): if ENV.get('SG_DEV_MODE'):
new_keys = set(list_keys(result[0])).difference({ new_keys = set(list(result[0])).difference({
'_highlightResult', 'aliases', 'banner', '_highlightResult', 'aliases', 'banner',
'fanart', 'firstaired', 'follower_count', 'fanart', 'firstaired', 'follower_count',
'id', 'image', 'is_tvdb_searchable', 'is_tvt_searchable', 'id', 'image', 'is_tvdb_searchable', 'is_tvt_searchable',
@ -788,7 +787,7 @@ class Tvdb(TVInfoBase):
series_found = self._getetsrc(self.config['url_search_series'], params=self.config['params_search_series'], series_found = self._getetsrc(self.config['url_search_series'], params=self.config['params_search_series'],
language=self.config['language']) language=self.config['language'])
if series_found: if series_found:
return list_values(series_found)[0] return list(series_found.values())[0]
except (BaseException, Exception): except (BaseException, Exception):
pass pass
@ -899,15 +898,15 @@ class Tvdb(TVInfoBase):
try: try:
for cur_result in (isinstance(results, dict) and results.get('results') or []): for cur_result in (isinstance(results, dict) and results.get('results') or []):
# sorts 'banners/images/missing/' to last before filter # sorts 'banners/images/missing/' to last before filter
people = filter_list( people = list(filter(
lambda r: 'person' == r['type'] lambda r: 'person' == r['type']
and rc_clean.sub(name, '') == rc_clean.sub(r['name'], ''), and rc_clean.sub(name, '') == rc_clean.sub(r['name'], ''),
cur_result.get('nbHits') cur_result.get('nbHits')
and sorted(cur_result.get('hits'), and sorted(cur_result.get('hits'),
key=lambda x: len(x['image']), reverse=True) or []) key=lambda x: len(x['image']), reverse=True) or []))
if ENV.get('SG_DEV_MODE'): if ENV.get('SG_DEV_MODE'):
for person in people: for person in people:
new_keys = set(list_keys(person)).difference({ new_keys = set(list(person)).difference({
'_highlightResult', 'banner', 'id', 'image', '_highlightResult', 'banner', 'id', 'image',
'is_tvdb_searchable', 'is_tvt_searchable', 'name', 'is_tvdb_searchable', 'is_tvt_searchable', 'name',
'objectID', 'people_birthdate', 'people_died', 'objectID', 'people_birthdate', 'people_died',

View file

@ -27,7 +27,6 @@ from lib.tvinfo_base import TVInfoBase, TVInfoImage, TVInfoImageSize, TVInfoImag
crew_type_names, TVInfoPerson, RoleTypes, TVInfoShow, TVInfoEpisode, TVInfoIDs, TVInfoNetwork, TVInfoSeason, \ crew_type_names, TVInfoPerson, RoleTypes, TVInfoShow, TVInfoEpisode, TVInfoIDs, TVInfoNetwork, TVInfoSeason, \
PersonGenders, TVINFO_TVMAZE, TVINFO_TVDB, TVINFO_IMDB PersonGenders, TVINFO_TVMAZE, TVINFO_TVDB, TVINFO_IMDB
from _23 import filter_iter
from six import integer_types, iteritems, string_types from six import integer_types, iteritems, string_types
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -683,7 +682,7 @@ class TvMaze(TVInfoBase):
premieres = [] premieres = []
returning = [] returning = []
rc_lang = re.compile('(?i)eng|jap') rc_lang = re.compile('(?i)eng|jap')
for cur_show in filter_iter(lambda s: 1 == s.episode_number and ( for cur_show in filter(lambda s: 1 == s.episode_number and (
None is s.show.language or rc_lang.search(s.show.language)), schedule): None is s.show.language or rc_lang.search(s.show.language)), schedule):
if 1 == cur_show.season_number: if 1 == cur_show.season_number:
premieres += [cur_show] premieres += [cur_show]

View file

@ -21,7 +21,7 @@ import string
import re import re
import struct import struct
from six import string_types, integer_types from six import string_types, integer_types
from _23 import decode_str, list_items from _23 import decode_str
__all__ = ['resolve'] __all__ = ['resolve']
@ -845,7 +845,7 @@ FOURCC = {
} }
# make it fool prove # make it fool prove
for code, value in list_items(FOURCC): for code, value in list(FOURCC.items()):
if not code.upper() in FOURCC: if not code.upper() in FOURCC:
FOURCC[code.upper()] = value FOURCC[code.upper()] = value
if code.endswith(' '): if code.endswith(' '):

View file

@ -36,8 +36,6 @@ from .rpc import Method
from .torrent import Torrent, methods as torrent_methods from .torrent import Torrent, methods as torrent_methods
from .tracker import Tracker, methods as tracker_methods from .tracker import Tracker, methods as tracker_methods
from _23 import filter_iter, filter_list, map_list
__version__ = '0.2.10' __version__ = '0.2.10'
__author__ = 'Chris Lucas' __author__ = 'Chris Lucas'
@ -184,15 +182,16 @@ class RTorrent(object):
@todo: add validity check for specified view @todo: add validity check for specified view
""" """
self.torrents = [] self.torrents = []
retriever_methods = filter_list(lambda m: m.is_retriever() and m.is_available(self), torrent_methods) retriever_methods = list(filter(lambda m: m.is_retriever() and m.is_available(self), torrent_methods))
mc = rpc.Multicall(self) mc = rpc.Multicall(self)
if self.method_exists('d.multicall2'): if self.method_exists('d.multicall2'):
mc.add('d.multicall2', '', view, 'd.hash=', mc.add('d.multicall2', '', view, 'd.hash=',
*map_list(lambda m2: ((getattr(m2, 'aliases') or [''])[-1] or m2.rpc_call) + '=', retriever_methods)) *list(map(lambda m2: ((getattr(m2, 'aliases') or [''])[-1] or m2.rpc_call) + '=',
retriever_methods)))
else: else:
mc.add('d.multicall', view, 'd.get_hash=', mc.add('d.multicall', view, 'd.get_hash=',
*map_list(lambda m1: m1.rpc_call + '=', retriever_methods)) *list(map(lambda m1: m1.rpc_call + '=', retriever_methods)))
results = mc.call()[0] # only sent one call, only need first result results = mc.call()[0] # only sent one call, only need first result
@ -240,7 +239,7 @@ class RTorrent(object):
try: try:
call, arg = x.split('=') call, arg = x.split('=')
method = rpc.find_method(call) method = rpc.find_method(call)
method_name = next(filter_iter(lambda m: self.method_exists(m), (method.rpc_call,) + method.aliases)) method_name = next(filter(lambda m: self.method_exists(m), (method.rpc_call,) + method.aliases))
param += ['%s=%s' % (method_name, arg)] param += ['%s=%s' % (method_name, arg)]
except (BaseException, Exception): except (BaseException, Exception):
pass pass
@ -267,7 +266,7 @@ class RTorrent(object):
max_retries = 10 max_retries = 10
while max_retries: while max_retries:
try: try:
t = next(filter_iter(lambda td: td.info_hash.upper() == info_hash, self.get_torrents())) t = next(filter(lambda td: td.info_hash.upper() == info_hash, self.get_torrents()))
break break
except (BaseException, Exception): except (BaseException, Exception):
time.sleep(self.request_interval) time.sleep(self.request_interval)
@ -326,7 +325,7 @@ class RTorrent(object):
if verify_load: if verify_load:
while verify_retries: while verify_retries:
try: try:
t = next(filter_iter(lambda td: td.info_hash == info_hash, self.get_torrents())) t = next(filter(lambda td: td.info_hash == info_hash, self.get_torrents()))
break break
except (BaseException, Exception): except (BaseException, Exception):
time.sleep(self.request_interval) time.sleep(self.request_interval)
@ -437,7 +436,7 @@ class RTorrent(object):
method = rpc.find_method('d.get_local_id') method = rpc.find_method('d.get_local_id')
result = True result = True
try: try:
func = next(filter_iter(lambda m: self.method_exists(m), (method.rpc_call,) + method.aliases)) func = next(filter(lambda m: self.method_exists(m), (method.rpc_call,) + method.aliases))
getattr(self.get_connection(), func)(info_hash) getattr(self.get_connection(), func)(info_hash)
except (BaseException, Exception): except (BaseException, Exception):
result = False result = False
@ -466,7 +465,7 @@ class RTorrent(object):
""" """
mc = rpc.Multicall(self) mc = rpc.Multicall(self)
for method in filter_iter(lambda m: m.is_retriever() and m.is_available(self), methods): for method in filter(lambda m: m.is_retriever() and m.is_available(self), methods):
mc.add(method) mc.add(method)
mc.call() mc.call()

View file

@ -22,8 +22,6 @@ from . import rpc
from .common import safe_repr from .common import safe_repr
from .rpc import Method from .rpc import Method
from _23 import filter_iter
class File(object): class File(object):
"""Represents an individual file within a L{Torrent} instance.""" """Represents an individual file within a L{Torrent} instance."""
@ -48,7 +46,7 @@ class File(object):
""" """
mc = rpc.Multicall(self) mc = rpc.Multicall(self)
for method in filter_iter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), methods): for method in filter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), methods):
mc.add(method, self.rpc_id) mc.add(method, self.rpc_id)
mc.call() mc.call()

View file

@ -21,8 +21,6 @@
from . import rpc from . import rpc
from .rpc import Method from .rpc import Method
from _23 import filter_iter
class Group(object): class Group(object):
__name__ = 'Group' __name__ = 'Group'
@ -72,7 +70,7 @@ class Group(object):
def _get_method(self, *choices): def _get_method(self, *choices):
try: try:
return next(filter_iter(lambda method: self._rt_obj.method_exists(method), choices)) return next(filter(lambda method: self._rt_obj.method_exists(method), choices))
except (BaseException, Exception): except (BaseException, Exception):
pass pass

View file

@ -27,8 +27,6 @@ import re
import rtorrent import rtorrent
from _23 import filter_iter, map_list
def get_varname(rpc_call): def get_varname(rpc_call):
"""Transform rpc method into variable name. """Transform rpc method into variable name.
@ -94,8 +92,8 @@ class Method(object):
if rt_obj.get_client_version_tuple() >= self.min_version: if rt_obj.get_client_version_tuple() >= self.min_version:
try: try:
self.varname = get_varname(next(filter_iter(lambda f: rt_obj.method_exists(f), self.varname = get_varname(next(filter(lambda f: rt_obj.method_exists(f),
(self.rpc_call,) + tuple(getattr(self, 'aliases', ''))))) (self.rpc_call,) + tuple(getattr(self, 'aliases', '')))))
return True return True
except (BaseException, Exception): except (BaseException, Exception):
pass pass
@ -162,7 +160,7 @@ class Multicall(object):
getattr(xmc, rpc_call)(*args) getattr(xmc, rpc_call)(*args)
try: try:
results = tuple(next(filter_iter(lambda x: isinstance(x, list), xmc().results))) results = tuple(next(filter(lambda x: isinstance(x, list), xmc().results)))
except (BaseException, Exception): except (BaseException, Exception):
return [[]] return [[]]
@ -216,8 +214,8 @@ def find_method(rpc_call):
"""Return L{Method} instance associated with given RPC call""" """Return L{Method} instance associated with given RPC call"""
try: try:
rpc_call = rpc_call.lower() rpc_call = rpc_call.lower()
return next(filter_iter(lambda m: rpc_call in map_list( return next(filter(lambda m: rpc_call in list(map(
lambda n: n.lower(), [m.rpc_call] + list(getattr(m, 'aliases', []))), lambda n: n.lower(), [m.rpc_call] + list(getattr(m, 'aliases', [])))),
rtorrent.methods + rtorrent.torrent.methods + rtorrent.methods + rtorrent.torrent.methods +
rtorrent.file.methods + rtorrent.tracker.methods + rtorrent.peer.methods)) rtorrent.file.methods + rtorrent.tracker.methods + rtorrent.peer.methods))
except (BaseException, Exception): except (BaseException, Exception):

View file

@ -25,8 +25,6 @@ from .peer import Peer, methods as peer_methods
from .rpc import Method from .rpc import Method
from .tracker import Tracker, methods as tracker_methods from .tracker import Tracker, methods as tracker_methods
from _23 import filter_iter, filter_list
class Torrent(object): class Torrent(object):
"""Represents an individual torrent within a L{RTorrent} instance.""" """Represents an individual torrent within a L{RTorrent} instance."""
@ -70,7 +68,7 @@ class Torrent(object):
@note: also assigns return value to self.peers @note: also assigns return value to self.peers
""" """
self.peers = [] self.peers = []
retriever_methods = filter_list(lambda m: m.is_retriever() and m.is_available(self._rt_obj), peer_methods) retriever_methods = list(filter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), peer_methods))
mc = rpc.Multicall(self) mc = rpc.Multicall(self)
# need to leave 2nd arg empty (dunno why) # need to leave 2nd arg empty (dunno why)
@ -97,7 +95,7 @@ class Torrent(object):
@note: also assigns return value to self.trackers @note: also assigns return value to self.trackers
""" """
self.trackers = [] self.trackers = []
retriever_methods = filter_list(lambda m: m.is_retriever() and m.is_available(self._rt_obj), tracker_methods) retriever_methods = list(filter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), tracker_methods))
mc = rpc.Multicall(self) mc = rpc.Multicall(self)
# need to leave 2nd arg empty (dunno why) # need to leave 2nd arg empty (dunno why)
@ -125,7 +123,7 @@ class Torrent(object):
""" """
self.files = [] self.files = []
retriever_methods = filter_list(lambda m: m.is_retriever() and m.is_available(self._rt_obj), file_methods) retriever_methods = list(filter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), file_methods))
mc = rpc.Multicall(self) mc = rpc.Multicall(self)
# 2nd arg can be anything, but it'll return all files in torrent # 2nd arg can be anything, but it'll return all files in torrent
@ -155,7 +153,7 @@ class Torrent(object):
def _get_method(self, *choices): def _get_method(self, *choices):
try: try:
return next(filter_iter(lambda method: self._rt_obj.method_exists(method), choices)) return next(filter(lambda method: self._rt_obj.method_exists(method), choices))
except (BaseException, Exception): except (BaseException, Exception):
pass pass
@ -276,7 +274,7 @@ class Torrent(object):
""" """
mc = rpc.Multicall(self) mc = rpc.Multicall(self)
for method in filter_iter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), methods): for method in filter(lambda m: m.is_retriever() and m.is_available(self._rt_obj), methods):
mc.add(method, self.rpc_id) mc.add(method, self.rpc_id)
mc.call() mc.call()

View file

@ -22,8 +22,6 @@ from . import rpc
from .common import safe_repr from .common import safe_repr
from .rpc import Method from .rpc import Method
from _23 import filter_iter
class Tracker(object): class Tracker(object):
"""Represents an individual tracker within a L{Torrent} instance.""" """Represents an individual tracker within a L{Torrent} instance."""
@ -64,7 +62,7 @@ class Tracker(object):
""" """
mc = rpc.Multicall(self) mc = rpc.Multicall(self)
for method in filter_iter(lambda fx: fx.is_retriever() and fx.is_available(self._rt_obj), methods): for method in filter(lambda fx: fx.is_retriever() and fx.is_available(self._rt_obj), methods):
mc.add(method, self.rpc_id) mc.add(method, self.rpc_id)
mc.call() mc.call()

View file

@ -1,27 +0,0 @@
Copyright (c) 2012, Ben Hoyt
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Ben Hoyt nor the names of its contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,697 +0,0 @@
"""scandir, a better directory iterator and faster os.walk(), now in the Python 3.5 stdlib
scandir() is a generator version of os.listdir() that returns an
iterator over files in a directory, and also exposes the extra
information most OSes provide while iterating files in a directory
(such as type and stat information).
This module also includes a version of os.walk() that uses scandir()
to speed it up significantly.
See README.md or https://github.com/benhoyt/scandir for rationale and
docs, or read PEP 471 (https://www.python.org/dev/peps/pep-0471/) for
more details on its inclusion into Python 3.5
scandir is released under the new BSD 3-clause license. See
LICENSE.txt for the full license text.
"""
from __future__ import division
from errno import ENOENT
from os import listdir, lstat, stat, strerror
from os.path import join, islink
from stat import S_IFDIR, S_IFLNK, S_IFREG
import collections
import sys
try:
import _scandir
except ImportError:
_scandir = None
try:
import ctypes
except ImportError:
ctypes = None
if None is not _scandir and None is not ctypes and not getattr(_scandir, 'DirEntry', None):
import warnings
warnings.warn("scandir compiled _scandir C module is too old, using slow generic fallback")
_scandir = None
elif _scandir is None and ctypes is None:
import warnings
warnings.warn("scandir can't find the compiled _scandir C module or ctypes, using slow generic fallback")
__version__ = '1.10.0'
__all__ = ['scandir', 'walk']
# Windows FILE_ATTRIBUTE constants for interpreting the
# FIND_DATA.dwFileAttributes member
FILE_ATTRIBUTE_ARCHIVE = 32
FILE_ATTRIBUTE_COMPRESSED = 2048
FILE_ATTRIBUTE_DEVICE = 64
FILE_ATTRIBUTE_DIRECTORY = 16
FILE_ATTRIBUTE_ENCRYPTED = 16384
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_INTEGRITY_STREAM = 32768
FILE_ATTRIBUTE_NORMAL = 128
FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 8192
FILE_ATTRIBUTE_NO_SCRUB_DATA = 131072
FILE_ATTRIBUTE_OFFLINE = 4096
FILE_ATTRIBUTE_READONLY = 1
FILE_ATTRIBUTE_REPARSE_POINT = 1024
FILE_ATTRIBUTE_SPARSE_FILE = 512
FILE_ATTRIBUTE_SYSTEM = 4
FILE_ATTRIBUTE_TEMPORARY = 256
FILE_ATTRIBUTE_VIRTUAL = 65536
IS_PY3 = sys.version_info >= (3, 0)
if IS_PY3:
unicode = str # Because Python <= 3.2 doesn't have u'unicode' syntax
class GenericDirEntry(object):
__slots__ = ('name', '_stat', '_lstat', '_scandir_path', '_path')
def __init__(self, scandir_path, name):
self._scandir_path = scandir_path
self.name = name
self._stat = None
self._lstat = None
self._path = None
@property
def path(self):
if self._path is None:
self._path = join(self._scandir_path, self.name)
return self._path
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
self._stat = stat(self.path)
return self._stat
else:
if self._lstat is None:
self._lstat = lstat(self.path)
return self._lstat
# The code duplication below is intentional: this is for slightly
# better performance on systems that fall back to GenericDirEntry.
# It avoids an additional attribute lookup and method call, which
# are relatively slow on CPython.
def is_dir(self, follow_symlinks=True):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False # Path doesn't exist or is a broken symlink
return st.st_mode & 0o170000 == S_IFDIR
def is_file(self, follow_symlinks=True):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False # Path doesn't exist or is a broken symlink
return st.st_mode & 0o170000 == S_IFREG
def is_symlink(self):
try:
st = self.stat(follow_symlinks=False)
except OSError as e:
if e.errno != ENOENT:
raise
return False # Path doesn't exist or is a broken symlink
return st.st_mode & 0o170000 == S_IFLNK
def inode(self):
st = self.stat(follow_symlinks=False)
return st.st_ino
def __str__(self):
return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name)
__repr__ = __str__
def _scandir_generic(path=unicode('.')):
"""Like os.listdir(), but yield DirEntry objects instead of returning
a list of names.
"""
for name in listdir(path):
yield GenericDirEntry(path, name)
if IS_PY3 and sys.platform == 'win32':
def scandir_generic(path=unicode('.')):
if isinstance(path, bytes):
raise TypeError("os.scandir() doesn't support bytes path on Windows, use Unicode instead")
return _scandir_generic(path)
scandir_generic.__doc__ = _scandir_generic.__doc__
else:
scandir_generic = _scandir_generic
scandir_c = None
scandir_python = None
if sys.platform == 'win32':
if ctypes is not None:
from ctypes import wintypes
# Various constants from windows.h
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
ERROR_FILE_NOT_FOUND = 2
ERROR_NO_MORE_FILES = 18
IO_REPARSE_TAG_SYMLINK = 0xA000000C
# Numer of seconds between 1601-01-01 and 1970-01-01
SECONDS_BETWEEN_EPOCHS = 11644473600
kernel32 = ctypes.windll.kernel32
# ctypes wrappers for (wide string versions of) FindFirstFile,
# FindNextFile, and FindClose
FindFirstFile = kernel32.FindFirstFileW
FindFirstFile.argtypes = [
wintypes.LPCWSTR,
ctypes.POINTER(wintypes.WIN32_FIND_DATAW),
]
FindFirstFile.restype = wintypes.HANDLE
FindNextFile = kernel32.FindNextFileW
FindNextFile.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(wintypes.WIN32_FIND_DATAW),
]
FindNextFile.restype = wintypes.BOOL
FindClose = kernel32.FindClose
FindClose.argtypes = [wintypes.HANDLE]
FindClose.restype = wintypes.BOOL
Win32StatResult = collections.namedtuple('Win32StatResult', [
'st_mode',
'st_ino',
'st_dev',
'st_nlink',
'st_uid',
'st_gid',
'st_size',
'st_atime',
'st_mtime',
'st_ctime',
'st_atime_ns',
'st_mtime_ns',
'st_ctime_ns',
'st_file_attributes',
])
def filetime_to_time(filetime):
"""Convert Win32 FILETIME to time since Unix epoch in seconds."""
total = filetime.dwHighDateTime << 32 | filetime.dwLowDateTime
return total / 10000000 - SECONDS_BETWEEN_EPOCHS
def find_data_to_stat(data):
"""Convert Win32 FIND_DATA struct to stat_result."""
# First convert Win32 dwFileAttributes to st_mode
attributes = data.dwFileAttributes
st_mode = 0
if attributes & FILE_ATTRIBUTE_DIRECTORY:
st_mode |= S_IFDIR | 0o111
else:
st_mode |= S_IFREG
if attributes & FILE_ATTRIBUTE_READONLY:
st_mode |= 0o444
else:
st_mode |= 0o666
if (attributes & FILE_ATTRIBUTE_REPARSE_POINT and
data.dwReserved0 == IO_REPARSE_TAG_SYMLINK):
st_mode ^= st_mode & 0o170000
st_mode |= S_IFLNK
st_size = data.nFileSizeHigh << 32 | data.nFileSizeLow
st_atime = filetime_to_time(data.ftLastAccessTime)
st_mtime = filetime_to_time(data.ftLastWriteTime)
st_ctime = filetime_to_time(data.ftCreationTime)
# Some fields set to zero per CPython's posixmodule.c: st_ino, st_dev,
# st_nlink, st_uid, st_gid
return Win32StatResult(st_mode, 0, 0, 0, 0, 0, st_size,
st_atime, st_mtime, st_ctime,
int(st_atime * 1000000000),
int(st_mtime * 1000000000),
int(st_ctime * 1000000000),
attributes)
class Win32DirEntryPython(object):
__slots__ = ('name', '_stat', '_lstat', '_find_data', '_scandir_path', '_path', '_inode')
def __init__(self, scandir_path, name, find_data):
self._scandir_path = scandir_path
self.name = name
self._stat = None
self._lstat = None
self._find_data = find_data
self._path = None
self._inode = None
@property
def path(self):
if self._path is None:
self._path = join(self._scandir_path, self.name)
return self._path
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
if self.is_symlink():
# It's a symlink, call link-following stat()
self._stat = stat(self.path)
else:
# Not a symlink, stat is same as lstat value
if self._lstat is None:
self._lstat = find_data_to_stat(self._find_data)
self._stat = self._lstat
return self._stat
else:
if self._lstat is None:
# Lazily convert to stat object, because it's slow
# in Python, and often we only need is_dir() etc
self._lstat = find_data_to_stat(self._find_data)
return self._lstat
def is_dir(self, follow_symlinks=True):
is_symlink = self.is_symlink()
if follow_symlinks and is_symlink:
try:
return self.stat().st_mode & 0o170000 == S_IFDIR
except OSError as e:
if e.errno != ENOENT:
raise
return False
elif is_symlink:
return False
else:
return (self._find_data.dwFileAttributes &
FILE_ATTRIBUTE_DIRECTORY != 0)
def is_file(self, follow_symlinks=True):
is_symlink = self.is_symlink()
if follow_symlinks and is_symlink:
try:
return self.stat().st_mode & 0o170000 == S_IFREG
except OSError as e:
if e.errno != ENOENT:
raise
return False
elif is_symlink:
return False
else:
return (self._find_data.dwFileAttributes &
FILE_ATTRIBUTE_DIRECTORY == 0)
def is_symlink(self):
return (self._find_data.dwFileAttributes &
FILE_ATTRIBUTE_REPARSE_POINT != 0 and
self._find_data.dwReserved0 == IO_REPARSE_TAG_SYMLINK)
def inode(self):
if self._inode is None:
self._inode = lstat(self.path).st_ino
return self._inode
def __str__(self):
return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name)
__repr__ = __str__
def win_error(error, filename):
exc = WindowsError(error, ctypes.FormatError(error))
exc.filename = filename
return exc
def _scandir_python(path=unicode('.')):
"""Like os.listdir(), but yield DirEntry objects instead of returning
a list of names.
"""
# Call FindFirstFile and handle errors
if isinstance(path, bytes):
is_bytes = True
filename = join(path.decode('mbcs', 'strict'), '*.*')
else:
is_bytes = False
filename = join(path, '*.*')
data = wintypes.WIN32_FIND_DATAW()
data_p = ctypes.byref(data)
handle = FindFirstFile(filename, data_p)
if handle == INVALID_HANDLE_VALUE:
error = ctypes.GetLastError()
if error == ERROR_FILE_NOT_FOUND:
# No files, don't yield anything
return
raise win_error(error, path)
# Call FindNextFile in a loop, stopping when no more files
try:
while True:
# Skip '.' and '..' (current and parent directory), but
# otherwise yield (filename, stat_result) tuple
name = data.cFileName
if name not in ('.', '..'):
if is_bytes:
name = name.encode('mbcs', 'replace')
yield Win32DirEntryPython(path, name, data)
data = wintypes.WIN32_FIND_DATAW()
data_p = ctypes.byref(data)
success = FindNextFile(handle, data_p)
if not success:
error = ctypes.GetLastError()
if error == ERROR_NO_MORE_FILES:
break
raise win_error(error, path)
finally:
if not FindClose(handle):
raise win_error(ctypes.GetLastError(), path)
if IS_PY3:
def scandir_python(path=unicode('.')):
if isinstance(path, bytes):
raise TypeError("os.scandir() doesn't support bytes path on Windows, use Unicode instead")
return _scandir_python(path)
scandir_python.__doc__ = _scandir_python.__doc__
else:
scandir_python = _scandir_python
if _scandir is not None:
scandir_c = _scandir.scandir
DirEntry_c = _scandir.DirEntry
if _scandir is not None:
scandir = scandir_c
DirEntry = DirEntry_c
elif ctypes is not None:
scandir = scandir_python
DirEntry = Win32DirEntryPython
else:
scandir = scandir_generic
DirEntry = GenericDirEntry
# Linux, OS X, and BSD implementation
elif sys.platform.startswith(('linux', 'darwin', 'sunos5')) or 'bsd' in sys.platform:
have_dirent_d_type = (sys.platform != 'sunos5')
if ctypes is not None and have_dirent_d_type:
import ctypes.util
DIR_p = ctypes.c_void_p
# Rather annoying how the dirent struct is slightly different on each
# platform. The only fields we care about are d_name and d_type.
class Dirent(ctypes.Structure):
if sys.platform.startswith('linux'):
_fields_ = (
('d_ino', ctypes.c_ulong),
('d_off', ctypes.c_long),
('d_reclen', ctypes.c_ushort),
('d_type', ctypes.c_byte),
('d_name', ctypes.c_char * 256),
)
elif 'openbsd' in sys.platform:
_fields_ = (
('d_ino', ctypes.c_uint64),
('d_off', ctypes.c_uint64),
('d_reclen', ctypes.c_uint16),
('d_type', ctypes.c_uint8),
('d_namlen', ctypes.c_uint8),
('__d_padding', ctypes.c_uint8 * 4),
('d_name', ctypes.c_char * 256),
)
else:
_fields_ = (
('d_ino', ctypes.c_uint32), # must be uint32, not ulong
('d_reclen', ctypes.c_ushort),
('d_type', ctypes.c_byte),
('d_namlen', ctypes.c_byte),
('d_name', ctypes.c_char * 256),
)
DT_UNKNOWN = 0
DT_DIR = 4
DT_REG = 8
DT_LNK = 10
Dirent_p = ctypes.POINTER(Dirent)
Dirent_pp = ctypes.POINTER(Dirent_p)
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
opendir = libc.opendir
opendir.argtypes = [ctypes.c_char_p]
opendir.restype = DIR_p
readdir_r = libc.readdir_r
readdir_r.argtypes = [DIR_p, Dirent_p, Dirent_pp]
readdir_r.restype = ctypes.c_int
closedir = libc.closedir
closedir.argtypes = [DIR_p]
closedir.restype = ctypes.c_int
file_system_encoding = sys.getfilesystemencoding()
class PosixDirEntry(object):
__slots__ = ('name', '_d_type', '_stat', '_lstat', '_scandir_path', '_path', '_inode')
def __init__(self, scandir_path, name, d_type, inode):
self._scandir_path = scandir_path
self.name = name
self._d_type = d_type
self._inode = inode
self._stat = None
self._lstat = None
self._path = None
@property
def path(self):
if self._path is None:
self._path = join(self._scandir_path, self.name)
return self._path
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
if self.is_symlink():
self._stat = stat(self.path)
else:
if self._lstat is None:
self._lstat = lstat(self.path)
self._stat = self._lstat
return self._stat
else:
if self._lstat is None:
self._lstat = lstat(self.path)
return self._lstat
def is_dir(self, follow_symlinks=True):
if (self._d_type == DT_UNKNOWN or
(follow_symlinks and self.is_symlink())):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False
return st.st_mode & 0o170000 == S_IFDIR
else:
return self._d_type == DT_DIR
def is_file(self, follow_symlinks=True):
if (self._d_type == DT_UNKNOWN or
(follow_symlinks and self.is_symlink())):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False
return st.st_mode & 0o170000 == S_IFREG
else:
return self._d_type == DT_REG
def is_symlink(self):
if self._d_type == DT_UNKNOWN:
try:
st = self.stat(follow_symlinks=False)
except OSError as e:
if e.errno != ENOENT:
raise
return False
return st.st_mode & 0o170000 == S_IFLNK
else:
return self._d_type == DT_LNK
def inode(self):
return self._inode
def __str__(self):
return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name)
__repr__ = __str__
def posix_error(filename):
errno = ctypes.get_errno()
exc = OSError(errno, strerror(errno))
exc.filename = filename
return exc
def scandir_python(path=unicode('.')):
"""Like os.listdir(), but yield DirEntry objects instead of returning
a list of names.
"""
if isinstance(path, bytes):
opendir_path = path
is_bytes = True
else:
opendir_path = path.encode(file_system_encoding)
is_bytes = False
dir_p = opendir(opendir_path)
if not dir_p:
raise posix_error(path)
try:
result = Dirent_p()
while True:
entry = Dirent()
if readdir_r(dir_p, entry, result):
raise posix_error(path)
if not result:
break
name = entry.d_name
if name not in (b'.', b'..'):
if not is_bytes:
name = name.decode(file_system_encoding)
yield PosixDirEntry(path, name, entry.d_type, entry.d_ino)
finally:
if closedir(dir_p):
raise posix_error(path)
if _scandir is not None:
scandir_c = _scandir.scandir
DirEntry_c = _scandir.DirEntry
if _scandir is not None:
scandir = scandir_c
DirEntry = DirEntry_c
elif ctypes is not None and have_dirent_d_type:
scandir = scandir_python
DirEntry = PosixDirEntry
else:
scandir = scandir_generic
DirEntry = GenericDirEntry
# Some other system -- no d_type or stat information
else:
scandir = scandir_generic
DirEntry = GenericDirEntry
def _walk(top, topdown=True, onerror=None, followlinks=False):
"""Like Python 3.5's implementation of os.walk() -- faster than
the pre-Python 3.5 version as it uses scandir() internally.
"""
dirs = []
nondirs = []
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
scandir_it = scandir(top)
except OSError as error:
if onerror is not None:
onerror(error)
return
while True:
try:
try:
entry = next(scandir_it)
except StopIteration:
break
except OSError as error:
if onerror is not None:
onerror(error)
return
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir:
dirs.append(entry.name)
else:
nondirs.append(entry.name)
if not topdown and is_dir:
# Bottom-up: recurse into sub-directory, but exclude symlinks to
# directories if followlinks is False
if followlinks:
walk_into = True
else:
try:
is_symlink = entry.is_symlink()
except OSError:
# If is_symlink() raises an OSError, consider that the
# entry is not a symbolic link, same behaviour than
# os.path.islink().
is_symlink = False
walk_into = not is_symlink
if walk_into:
for entry in walk(entry.path, topdown, onerror, followlinks):
yield entry
# Yield before recursion if going top down
if topdown:
yield top, dirs, nondirs
# Recurse into sub-directories
for name in dirs:
new_path = join(top, name)
# Issue #23605: os.path.islink() is used instead of caching
# entry.is_symlink() result during the loop on os.scandir() because
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
for entry in walk(new_path, topdown, onerror, followlinks):
yield entry
else:
# Yield after recursion if going bottom up
yield top, dirs, nondirs
if IS_PY3 or sys.platform != 'win32':
walk = _walk
else:
# Fix for broken unicode handling on Windows on Python 2.x, see:
# https://github.com/benhoyt/scandir/issues/54
file_system_encoding = sys.getfilesystemencoding()
def walk(top, topdown=True, onerror=None, followlinks=False):
if isinstance(top, bytes):
top = top.decode(file_system_encoding)
return _walk(top, topdown, onerror, followlinks)

View file

@ -14,9 +14,5 @@
# #
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>. # along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import sys
if 2 == sys.version_info[0]: from .py3 import *
from .py2 import *
else:
from .py3 import *

View file

@ -1,13 +1,8 @@
import re import re
import sys
import threading import threading
if 2 == sys.version_info[0]: # noinspection PyProtectedMember,PyUnresolvedReferences
# noinspection PyProtectedMember from concurrent.futures.thread import _WorkItem
from .futures.thread import _WorkItem
else:
# noinspection PyCompatibility,PyProtectedMember
from concurrent.futures.thread import _WorkItem
class GenericWorkItem(_WorkItem): class GenericWorkItem(_WorkItem):

View file

@ -1,23 +0,0 @@
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Execute computations asynchronously using threads or processes."""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
from ._base import (FIRST_COMPLETED,
FIRST_EXCEPTION,
ALL_COMPLETED,
CancelledError,
TimeoutError,
Future,
Executor,
wait,
as_completed)
from .thread import ThreadPoolExecutor
try:
from .process import ProcessPoolExecutor
except ImportError:
# some platforms don't have multiprocessing
pass

View file

@ -1,673 +0,0 @@
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
import collections
import logging
import threading
import itertools
import time
import types
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def _yield_finished_futures(fs, waiter, ref_collect):
"""
Iterate on the list *fs*, yielding finished futures one by one in
reverse order.
Before yielding a future, *waiter* is removed from its waiters
and the future is removed from each set in the collection of sets
*ref_collect*.
The aim of this function is to avoid keeping stale references after
the future is yielded and before the iterator resumes.
"""
while fs:
f = fs[-1]
for futures_set in ref_collect:
futures_set.remove(f)
with f._condition:
f._waiters.remove(waiter)
del f
# Careful not to keep a reference to the popped value
yield fs.pop()
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = set(fs)
total_futures = len(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
finished = list(finished)
try:
for f in _yield_finished_futures(finished, waiter,
ref_collect=(fs,)):
f = [f]
yield f.pop()
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), total_futures))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
# reverse to keep finishing order
finished.reverse()
for f in _yield_finished_futures(finished, waiter,
ref_collect=(fs, pending)):
f = [f]
yield f.pop()
finally:
# Remove waiter from unfinished futures
for f in fs:
with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._traceback = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
except BaseException:
# Explicitly let all other new-style exceptions through so
# that we can catch all old-style exceptions with a simple
# "except:" clause below.
#
# All old-style exception objects are instances of
# types.InstanceType, but "except types.InstanceType:" does
# not catch old-style exceptions for some reason. Thus, the
# only way to catch all old-style exceptions without catching
# any new-style exceptions is to filter out the new-style
# exceptions, which all derive from BaseException.
raise
except:
# Because of the BaseException clause above, this handler only
# executes for old-style exception objects.
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<%s at %#x state=%s raised %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<%s at %#x state=%s returned %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<%s at %#x state=%s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future was cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
if isinstance(self._exception, types.InstanceType):
# The exception is an instance of an old-style class, which
# means type(self._exception) returns types.ClassType instead
# of the exception's actual class type.
exception_type = self._exception.__class__
else:
exception_type = type(self._exception)
raise exception_type, self._exception, self._traceback
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception_info(self, timeout=None):
"""Return a tuple of (exception, traceback) raised by the call that the
future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
return self.exception_info(timeout)[0]
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self),
self._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception_info(self, exception, traceback):
"""Sets the result of the future as being the given exception
and traceback.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._traceback = traceback
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
self.set_exception_info(exception, None)
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
# reverse to keep finishing order
fs.reverse()
while fs:
# Careful not to keep a reference to the popped future
if timeout is None:
yield fs.pop().result()
else:
yield fs.pop().result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
class BrokenExecutor(RuntimeError):
"""
Raised when a executor has become non-functional after a severe failure.
"""

View file

@ -1,363 +0,0 @@
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | => | | => | Call Q | => | |
| | +----------+ | | +-----------+ | |
| | | ... | | | | ... | | |
| | | 6 | | | | 5, call() | | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | | | | 4, result | | |
| | | ... | | | | 3, except | | |
+----------+ +------------+ +--------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Request Q"
"""
import atexit
from . import _base
import Queue as queue
import multiprocessing
import threading
import weakref
import sys
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads and processes. This is done to allow the
# interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items()) if _threads_queues else ()
for t, q in items:
q.put(None)
for t, q in items:
t.join(sys.maxint)
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(None)
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except:
e = sys.exc_info()[1]
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_management_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Args:
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process: A list of the multiprocessing.Process instances used as
workers.
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue: A multiprocessing.Queue of _ResultItems generated by the
process workers.
"""
nb_shutdown_processes = [0]
def shutdown_one_process():
"""Tell a worker to terminate, which will in turn wake us again"""
call_queue.put(None)
nb_shutdown_processes[0] += 1
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
result_item = result_queue.get(block=True)
if result_item is not None:
work_item = pending_work_items[result_item.work_id]
del pending_work_items[result_item.work_id]
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
# Delete references to object. See issue16284
del work_item
# Check whether we should start shutting down.
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if _shutdown or executor is None or executor._shutdown_thread:
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
while nb_shutdown_processes[0] < len(processes):
shutdown_one_process()
# If .join() is not called on the created processes then
# some multiprocessing.Queue methods may deadlock on Mac OS
# X.
for p in processes:
p.join()
call_queue.close()
return
del executor
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked:
if _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
import os
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# indetermine limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
raise NotImplementedError(_system_limited)
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = multiprocessing.cpu_count()
else:
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
self._result_queue = multiprocessing.Queue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
self._processes = set()
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(_, q=self._result_queue):
q.put(None)
if self._queue_management_thread is None:
self._queue_management_thread = threading.Thread(
target=_queue_management_worker,
args=(weakref.ref(self, weakref_cb),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_threads_queues[self._queue_management_thread] = self._result_queue
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue))
p.start()
self._processes.add(p)
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._result_queue.put(None)
self._start_queue_management_thread()
self._adjust_process_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if self._queue_management_thread:
# Wake up queue management thread
self._result_queue.put(None)
if wait:
self._queue_management_thread.join(sys.maxint)
# To reduce the risk of openning too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
self._result_queue = None
self._processes = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
atexit.register(_python_exit)

View file

@ -1,207 +0,0 @@
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
import atexit
from six import PY2
if PY2:
from . import _base
else:
from concurrent.futures import _base
import itertools
import Queue as queue
import threading
import weakref
import sys
try:
from multiprocessing import cpu_count
except ImportError:
# some platforms don't have multiprocessing
def cpu_count():
return None
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items()) if _threads_queues else ()
for t, q in items:
q.put(None)
for t, q in items:
t.join(sys.maxint)
atexit.register(_python_exit)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except:
e, tb = sys.exc_info()[1:]
self.future.set_exception_info(e, tb)
else:
self.future.set_result(result)
def _worker(executor_reference, work_queue, initializer, initargs):
if initializer is not None:
try:
initializer(*initargs)
except BaseException:
_base.LOGGER.critical('Exception in initializer:', exc_info=True)
executor = executor_reference()
if executor is not None:
executor._initializer_failed()
return
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
# Delete references to object. See issue16284
del work_item
# attempt to increment idle count
executor = executor_reference()
if executor is not None:
executor._idle_semaphore.release()
del executor
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
# Notice other workers
work_queue.put(None)
return
del executor
except:
_base.LOGGER.critical('Exception in worker', exc_info=True)
class BrokenThreadPool(_base.BrokenExecutor):
"""
Raised when a worker thread in a ThreadPoolExecutor failed initializing.
"""
class ThreadPoolExecutor(_base.Executor):
# Used to assign unique thread names when thread_name_prefix is not supplied.
_counter = itertools.count().next
def __init__(self, max_workers=None, thread_name_prefix='', initializer=None, initargs=()):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
thread_name_prefix: An optional name prefix to give our threads.
"""
if max_workers is None:
# Use this number because ThreadPoolExecutor is often
# used to overlap I/O instead of CPU work.
max_workers = (cpu_count() or 1) * 5
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers
self._initializer = initializer
self._initargs = initargs
self._work_queue = queue.Queue()
self._idle_semaphore = threading.Semaphore(0)
self._threads = set()
self._broken = False
self._shutdown = False
self._shutdown_lock = threading.Lock()
self._thread_name_prefix = (thread_name_prefix or
("ThreadPoolExecutor-%d" % self._counter()))
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._broken:
raise BrokenThreadPool(self._broken)
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
# if idle threads are available, don't spin new threads
if self._idle_semaphore.acquire(False):
return
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
q.put(None)
num_threads = len(self._threads)
if num_threads < self._max_workers:
thread_name = '%s_%d' % (self._thread_name_prefix or self,
num_threads)
t = threading.Thread(name=thread_name, target=_worker,
args=(weakref.ref(self, weakref_cb),
self._work_queue, self._initializer, self._initargs))
t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
def _initializer_failed(self):
with self._shutdown_lock:
self._broken = ('A thread initializer failed, the thread pool '
'is not usable anymore')
# Drain work queue and mark pending futures failed
while True:
try:
work_item = self._work_queue.get_nowait()
except queue.Empty:
break
if work_item is not None:
work_item.future.set_exception(BrokenThreadPool(self._broken))
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self._work_queue.put(None)
if wait:
for t in self._threads:
t.join(sys.maxint)
shutdown.__doc__ = _base.Executor.shutdown.__doc__

View file

@ -1,55 +0,0 @@
# coding=utf-8
#
# This file is part of SickGear.
#
# SickGear is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickGear is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>.
# noinspection PyUnresolvedReferences
import sys
# noinspection PyProtectedMember
from .futures.thread import _base, BrokenThreadPool, ThreadPoolExecutor
from .base import *
class SgWorkItem(GenericWorkItem):
def run(self):
if self.future.set_running_or_notify_cancel():
try:
self._set_thread_name()
result = self.fn(*self.args, **self.kwargs)
except (BaseException, Exception):
e, tb = sys.exc_info()[1:]
self.future.set_exception_info(e, tb)
else:
self.future.set_result(result)
class SgThreadPoolExecutor(ThreadPoolExecutor):
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._broken:
raise BrokenThreadPool(self._broken)
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = SgWorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f

View file

@ -35,8 +35,8 @@ from send2trash import send2trash
from encodingKludge import SYS_ENCODING from encodingKludge import SYS_ENCODING
import requests import requests
from _23 import decode_bytes, filter_list, html_unescape, list_range, \ from _23 import decode_bytes, html_unescape, list_range, \
ordered_dict, Popen, scandir, urlparse, urlsplit, urlunparse Popen, scandir, urlparse, urlsplit, urlunparse
from six import integer_types, iteritems, iterkeys, itervalues, moves, PY2, string_types, text_type from six import integer_types, iteritems, iterkeys, itervalues, moves, PY2, string_types, text_type
import zipfile import zipfile
@ -810,8 +810,8 @@ def get_url(url, # type: AnyStr
response_attr = ('text', 'content')[as_binary] response_attr = ('text', 'content')[as_binary]
# selectively mute some errors # selectively mute some errors
mute = filter_list(lambda x: kwargs.pop(x, False), [ mute = list(filter(lambda x: kwargs.pop(x, False), [
'mute_connect_err', 'mute_read_timeout', 'mute_connect_timeout', 'mute_http_error']) 'mute_connect_err', 'mute_read_timeout', 'mute_connect_timeout', 'mute_http_error']))
# reuse or instantiate request session # reuse or instantiate request session
resp_sess = kwargs.pop('resp_sess', None) resp_sess = kwargs.pop('resp_sess', None)
@ -1617,12 +1617,12 @@ def ast_eval(value, default=None):
return default return default
if 'OrderedDict()' == value: if 'OrderedDict()' == value:
value = ordered_dict() value = dict()
elif 'OrderedDict([(' == value[0:14]: elif 'OrderedDict([(' == value[0:14]:
try: try:
list_of_tuples = ast.literal_eval(value[12:-1]) list_of_tuples = ast.literal_eval(value[12:-1])
value = ordered_dict() value = dict()
for cur_tuple in list_of_tuples: for cur_tuple in list_of_tuples:
value[cur_tuple[0]] = cur_tuple[1] value[cur_tuple[0]] = cur_tuple[1]
except (BaseException, Exception): except (BaseException, Exception):

View file

@ -8,7 +8,6 @@ import time
from exceptions_helper import ex from exceptions_helper import ex
from six import integer_types, iteritems, iterkeys, string_types, text_type from six import integer_types, iteritems, iterkeys, string_types, text_type
from _23 import list_items, list_values
from lib.tvinfo_base.exceptions import * from lib.tvinfo_base.exceptions import *
from sg_helpers import calc_age, make_path from sg_helpers import calc_age, make_path
@ -53,7 +52,7 @@ tv_src_names = {
TVINFO_IMDB: 'imdb', TVINFO_IMDB: 'imdb',
TVINFO_TRAKT: 'trakt', TVINFO_TRAKT: 'trakt',
TVINFO_TMDB: 'tmdb', TVINFO_TMDB: 'tmdb',
TVINFO_TVDB_SLUG : 'tvdb slug', TVINFO_TVDB_SLUG: 'tvdb slug',
TVINFO_TRAKT_SLUG: 'trakt slug', TVINFO_TRAKT_SLUG: 'trakt slug',
TVINFO_SLUG: 'generic slug', TVINFO_SLUG: 'generic slug',
@ -67,7 +66,7 @@ tv_src_names = {
log = logging.getLogger('TVInfo') log = logging.getLogger('TVInfo')
log.addHandler(logging.NullHandler()) log.addHandler(logging.NullHandler())
TVInfoShowContainer = {} # type: Dict[ShowContainer] TVInfoShowContainer = {} # type: Dict[str, ShowContainer]
class ShowContainer(dict): class ShowContainer(dict):
@ -94,7 +93,7 @@ class ShowContainer(dict):
if acquired_lock: if acquired_lock:
try: try:
current_time = time.time() current_time = time.time()
for k, v in list_items(self): for k, v in list(self.items()):
if self.max_age < current_time - v[1]: if self.max_age < current_time - v[1]:
lock_acquired = self[k].lock.acquire(False) lock_acquired = self[k].lock.acquire(False)
if lock_acquired: if lock_acquired:
@ -125,7 +124,7 @@ class TVInfoIDs(object):
trakt=None, # type: integer_types trakt=None, # type: integer_types
rage=None, # type: integer_types rage=None, # type: integer_types
ids=None # type: Dict[int, integer_types] ids=None # type: Dict[int, integer_types]
): # type: (...) -> TVInfoIDs ):
ids = ids or {} ids = ids or {}
self.tvdb = tvdb or ids.get(TVINFO_TVDB) self.tvdb = tvdb or ids.get(TVINFO_TVDB)
self.tmdb = tmdb or ids.get(TVINFO_TMDB) self.tmdb = tmdb or ids.get(TVINFO_TMDB)
@ -156,7 +155,7 @@ class TVInfoIDs(object):
class TVInfoSocialIDs(object): class TVInfoSocialIDs(object):
def __init__(self, twitter=None, instagram=None, facebook=None, wikipedia=None, ids=None): def __init__(self, twitter=None, instagram=None, facebook=None, wikipedia=None, ids=None):
# type: (str_int, str_int, str_int, str_int, Dict[int, str_int]) -> TVInfoSocialIDs # type: (str_int, str_int, str_int, str_int, Dict[int, str_int]) -> None
ids = ids or {} ids = ids or {}
self.twitter = twitter or ids.get(TVINFO_TWITTER) self.twitter = twitter or ids.get(TVINFO_TWITTER)
self.instagram = instagram or ids.get(TVINFO_INSTAGRAM) self.instagram = instagram or ids.get(TVINFO_INSTAGRAM)
@ -231,7 +230,7 @@ class TVInfoImage(object):
lang=None, height=None, width=None, aspect_ratio=None): lang=None, height=None, width=None, aspect_ratio=None):
self.img_id = img_id # type: Optional[integer_types] self.img_id = img_id # type: Optional[integer_types]
self.image_type = image_type # type: integer_types self.image_type = image_type # type: integer_types
self.sizes = sizes # type: Dict[TVInfoImageSize, AnyStr] self.sizes = sizes # type: Dict[int, AnyStr]
self.type_str = type_str # type: AnyStr self.type_str = type_str # type: AnyStr
self.main_image = main_image # type: bool self.main_image = main_image # type: bool
self.rating = rating # type: Optional[Union[float, integer_types]] self.rating = rating # type: Optional[Union[float, integer_types]]
@ -243,7 +242,7 @@ class TVInfoImage(object):
def __str__(self): def __str__(self):
return '<TVInfoImage %s [%s]>' % (TVInfoImageType.reverse_str.get(self.image_type, 'unknown'), return '<TVInfoImage %s [%s]>' % (TVInfoImageType.reverse_str.get(self.image_type, 'unknown'),
', '.join(TVInfoImageSize.reverse_str.get(s, 'unkown') for s in self.sizes)) ', '.join(TVInfoImageSize.reverse_str.get(s, 'unknown') for s in self.sizes))
__repr__ = __str__ __repr__ = __str__
@ -409,7 +408,7 @@ class TVInfoShow(dict):
match, and so on. match, and so on.
""" """
results = [] results = []
for cur_season in list_values(self): for cur_season in self.values():
searchresult = cur_season.search(term=term, key=key) searchresult = cur_season.search(term=term, key=key)
if 0 != len(searchresult): if 0 != len(searchresult):
results.extend(searchresult) results.extend(searchresult)
@ -487,7 +486,7 @@ class TVInfoSeason(dict):
instances. instances.
""" """
results = [] results = []
for ep in list_values(self): for ep in self.values():
searchresult = ep.search(term=term, key=key) searchresult = ep.search(term=term, key=key)
if None is not searchresult: if None is not searchresult:
results.append(searchresult) results.append(searchresult)
@ -679,7 +678,7 @@ class PersonBase(dict):
ids=None, # type: Dict ids=None, # type: Dict
thumb_url=None, # type: AnyStr thumb_url=None, # type: AnyStr
**kwargs # type: Dict **kwargs # type: Dict
): # type: (...) -> PersonBase ):
super(PersonBase, self).__init__(**kwargs) super(PersonBase, self).__init__(**kwargs)
self.id = p_id # type: Optional[integer_types] self.id = p_id # type: Optional[integer_types]
self.name = name # type: Optional[AnyStr] self.name = name # type: Optional[AnyStr]
@ -769,7 +768,7 @@ class TVInfoPerson(PersonBase):
real_name=None, # type: AnyStr real_name=None, # type: AnyStr
akas=None, # type: Set[AnyStr] akas=None, # type: Set[AnyStr]
**kwargs # type: Dict **kwargs # type: Dict
): # type: (...) -> TVInfoPerson ):
super(TVInfoPerson, self).__init__( super(TVInfoPerson, self).__init__(
p_id=p_id, name=name, image=image, thumb_url=thumb_url, bio=bio, gender=gender, p_id=p_id, name=name, image=image, thumb_url=thumb_url, bio=bio, gender=gender,
birthdate=birthdate, deathdate=deathdate, country=country, images=images, birthdate=birthdate, deathdate=deathdate, country=country, images=images,
@ -795,7 +794,7 @@ class TVInfoPerson(PersonBase):
class TVInfoCharacter(PersonBase): class TVInfoCharacter(PersonBase):
def __init__(self, person=None, voice=None, plays_self=None, regular=None, show=None, start_year=None, def __init__(self, person=None, voice=None, plays_self=None, regular=None, show=None, start_year=None,
end_year=None, **kwargs): end_year=None, **kwargs):
# type: (List[TVInfoPerson], bool, bool, bool, TVInfoShow, int, int, Dict) -> TVInfoCharacter # type: (List[TVInfoPerson], bool, bool, bool, TVInfoShow, int, int, Dict) -> None
super(TVInfoCharacter, self).__init__(**kwargs) super(TVInfoCharacter, self).__init__(**kwargs)
self.person = person # type: List[TVInfoPerson] self.person = person # type: List[TVInfoPerson]
self.voice = voice # type: Optional[bool] self.voice = voice # type: Optional[bool]

View file

@ -55,8 +55,8 @@ from browser_ua import get_ua
from configobj import ConfigObj from configobj import ConfigObj
from api_trakt import TraktAPI from api_trakt import TraktAPI
from _23 import b64encodestring, decode_bytes, filter_iter, list_items, map_list, ordered_dict, scandir from _23 import b64encodestring, decode_bytes, scandir
from six import iteritems, PY2, string_types from six import iteritems, string_types
import sg_helpers import sg_helpers
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -1353,10 +1353,10 @@ def init_stage_1(console_logging):
EPISODE_VIEW_MISSED_RANGE = check_setting_int(CFG, 'GUI', 'episode_view_missed_range', 7) EPISODE_VIEW_MISSED_RANGE = check_setting_int(CFG, 'GUI', 'episode_view_missed_range', 7)
HISTORY_LAYOUT = check_setting_str(CFG, 'GUI', 'history_layout', 'detailed') HISTORY_LAYOUT = check_setting_str(CFG, 'GUI', 'history_layout', 'detailed')
BROWSELIST_HIDDEN = map_list( BROWSELIST_HIDDEN = list(map(
lambda y: TVidProdid.glue in y and y or '%s%s%s' % ( lambda y: TVidProdid.glue in y and y or '%s%s%s' % (
(TVINFO_TVDB, TVINFO_IMDB)[bool(helpers.parse_imdb_id(y))], TVidProdid.glue, y), (TVINFO_TVDB, TVINFO_IMDB)[bool(helpers.parse_imdb_id(y))], TVidProdid.glue, y),
[x.strip() for x in check_setting_str(CFG, 'GUI', 'browselist_hidden', '').split('|~|') if x.strip()]) [x.strip() for x in check_setting_str(CFG, 'GUI', 'browselist_hidden', '').split('|~|') if x.strip()]))
BROWSELIST_MRU = sg_helpers.ast_eval(check_setting_str(CFG, 'GUI', 'browselist_prefs', None), {}) BROWSELIST_MRU = sg_helpers.ast_eval(check_setting_str(CFG, 'GUI', 'browselist_prefs', None), {})
BACKUP_DB_PATH = check_setting_str(CFG, 'Backup', 'backup_db_path', '') BACKUP_DB_PATH = check_setting_str(CFG, 'Backup', 'backup_db_path', '')
@ -1450,7 +1450,7 @@ def init_stage_1(console_logging):
setattr(nzb_prov, attr, check_setting_str(CFG, prov_id_uc, attr_check, default)) setattr(nzb_prov, attr, check_setting_str(CFG, prov_id_uc, attr_check, default))
elif isinstance(default, int): elif isinstance(default, int):
setattr(nzb_prov, attr, check_setting_int(CFG, prov_id_uc, attr_check, default)) setattr(nzb_prov, attr, check_setting_int(CFG, prov_id_uc, attr_check, default))
for cur_provider in filter_iter(lambda p: abs(zlib.crc32(decode_bytes(p.name))) + 40000400 in ( for cur_provider in filter(lambda p: abs(zlib.crc32(decode_bytes(p.name))) + 40000400 in (
1449593765, 1597250020, 1524942228, 160758496, 2925374331 1449593765, 1597250020, 1524942228, 160758496, 2925374331
) or (p.url and abs(zlib.crc32(decode_bytes(re.sub(r'[./]', '', p.url[-10:])))) + 40000400 in ( ) or (p.url and abs(zlib.crc32(decode_bytes(re.sub(r'[./]', '', p.url[-10:])))) + 40000400 in (
2417143804,)), providers.sortedProviderList()): 2417143804,)), providers.sortedProviderList()):
@ -1505,24 +1505,6 @@ def init_stage_1(console_logging):
pass pass
logger.sb_log_instance.init_logging(console_logging=console_logging) logger.sb_log_instance.init_logging(console_logging=console_logging)
if PY2:
try:
import _scandir
except ImportError:
_scandir = None
try:
import ctypes
except ImportError:
ctypes = None
if None is not _scandir and None is not ctypes and not getattr(_scandir, 'DirEntry', None):
MODULE_UPDATE_STRING = \
'Your scandir binary module is outdated, using the slow but newer Python module.' \
'<br>Upgrade the binary at a command prompt with' \
' # <span class="boldest">python -m pip install -U scandir</span>' \
'<br>Important: You <span class="boldest">must</span> Shutdown SickGear before upgrading'
showList = [] showList = []
showDict = {} showDict = {}
@ -1865,7 +1847,7 @@ def save_config():
# For passwords you must include the word `password` in the item_name and # For passwords you must include the word `password` in the item_name and
# add `helpers.encrypt(ITEM_NAME, ENCRYPTION_VERSION)` in save_config() # add `helpers.encrypt(ITEM_NAME, ENCRYPTION_VERSION)` in save_config()
new_config['General'] = ordered_dict() new_config['General'] = dict()
s_z = check_setting_int(CFG, 'General', 'stack_size', 0) s_z = check_setting_int(CFG, 'General', 'stack_size', 0)
if s_z: if s_z:
new_config['General']['stack_size'] = s_z new_config['General']['stack_size'] = s_z
@ -1927,7 +1909,8 @@ def save_config():
new_config['General']['flatten_folders_default'] = int(FLATTEN_FOLDERS_DEFAULT) new_config['General']['flatten_folders_default'] = int(FLATTEN_FOLDERS_DEFAULT)
new_config['General']['anime_default'] = int(ANIME_DEFAULT) new_config['General']['anime_default'] = int(ANIME_DEFAULT)
new_config['General']['provider_order'] = ' '.join(PROVIDER_ORDER) new_config['General']['provider_order'] = ' '.join(PROVIDER_ORDER)
new_config['General']['provider_homes'] = '%s' % dict([(pid, v) for pid, v in list_items(PROVIDER_HOMES) if pid in [ new_config['General']['provider_homes'] = '%s' % dict([(pid, v) for pid, v in list(PROVIDER_HOMES.items())
if pid in [
p.get_id() for p in [x for x in providers.sortedProviderList() if GenericProvider.TORRENT == x.providerType]]]) p.get_id() for p in [x for x in providers.sortedProviderList() if GenericProvider.TORRENT == x.providerType]]])
new_config['General']['update_notify'] = int(UPDATE_NOTIFY) new_config['General']['update_notify'] = int(UPDATE_NOTIFY)
new_config['General']['update_auto'] = int(UPDATE_AUTO) new_config['General']['update_auto'] = int(UPDATE_AUTO)
@ -2014,7 +1997,7 @@ def save_config():
new_config['Backup']['backup_db_max_count'] = BACKUP_DB_MAX_COUNT new_config['Backup']['backup_db_max_count'] = BACKUP_DB_MAX_COUNT
default_not_zero = ('enable_recentsearch', 'enable_backlog', 'enable_scheduled_backlog', 'use_after_get_data') default_not_zero = ('enable_recentsearch', 'enable_backlog', 'enable_scheduled_backlog', 'use_after_get_data')
for src in filter_iter(lambda px: GenericProvider.TORRENT == px.providerType, providers.sortedProviderList()): for src in filter(lambda px: GenericProvider.TORRENT == px.providerType, providers.sortedProviderList()):
src_id = src.get_id() src_id = src.get_id()
src_id_uc = src_id.upper() src_id_uc = src_id.upper()
new_config[src_id_uc] = {} new_config[src_id_uc] = {}
@ -2052,19 +2035,19 @@ def save_config():
del new_config[src_id_uc] del new_config[src_id_uc]
default_not_zero = ('enable_recentsearch', 'enable_backlog', 'enable_scheduled_backlog') default_not_zero = ('enable_recentsearch', 'enable_backlog', 'enable_scheduled_backlog')
for src in filter_iter(lambda px: GenericProvider.NZB == px.providerType, providers.sortedProviderList()): for src in filter(lambda px: GenericProvider.NZB == px.providerType, providers.sortedProviderList()):
src_id = src.get_id() src_id = src.get_id()
src_id_uc = src.get_id().upper() src_id_uc = src.get_id().upper()
new_config[src_id_uc] = {} new_config[src_id_uc] = {}
if int(src.enabled): if int(src.enabled):
new_config[src_id_uc][src_id] = int(src.enabled) new_config[src_id_uc][src_id] = int(src.enabled)
for attr in filter_iter(lambda _a: None is not getattr(src, _a, None), for attr in filter(lambda _a: None is not getattr(src, _a, None),
('api_key', 'digest', 'username', 'search_mode')): ('api_key', 'digest', 'username', 'search_mode')):
if 'search_mode' != attr or 'eponly' != getattr(src, attr): if 'search_mode' != attr or 'eponly' != getattr(src, attr):
new_config[src_id_uc]['%s_%s' % (src_id, attr)] = getattr(src, attr) new_config[src_id_uc]['%s_%s' % (src_id, attr)] = getattr(src, attr)
for attr in filter_iter(lambda _a: None is not getattr(src, _a, None), ( for attr in filter(lambda _a: None is not getattr(src, _a, None), (
'enable_recentsearch', 'enable_backlog', 'enable_scheduled_backlog', 'enable_recentsearch', 'enable_backlog', 'enable_scheduled_backlog',
'scene_only', 'scene_loose', 'scene_loose_active', 'scene_only', 'scene_loose', 'scene_loose_active',
'scene_rej_nuked', 'scene_nuked_active', 'scene_rej_nuked', 'scene_nuked_active',
@ -2280,7 +2263,7 @@ def save_config():
cfg_lc = cfg.lower() cfg_lc = cfg.lower()
cfg_keys += [cfg] cfg_keys += [cfg]
new_config[cfg] = {} new_config[cfg] = {}
for (k, v) in filter_iter(lambda arg: any([arg[1]]) or ( for (k, v) in filter(lambda arg: any([arg[1]]) or (
# allow saving where item value default is non-zero but 0 is a required setting value # allow saving where item value default is non-zero but 0 is a required setting value
cfg_lc in ('kodi', 'xbmc', 'synoindex', 'nzbget', 'torrent', 'telegram') cfg_lc in ('kodi', 'xbmc', 'synoindex', 'nzbget', 'torrent', 'telegram')
and arg[0] in ('always_on', 'priority', 'send_image')) and arg[0] in ('always_on', 'priority', 'send_image'))
@ -2320,7 +2303,7 @@ def save_config():
new_config[notifier]['%s_notify_onsubtitledownload' % notifier.lower()] = int(onsubtitledownload) new_config[notifier]['%s_notify_onsubtitledownload' % notifier.lower()] = int(onsubtitledownload)
# remove empty stanzas # remove empty stanzas
for k in filter_iter(lambda c: not new_config[c], cfg_keys): for k in filter(lambda c: not new_config[c], cfg_keys):
del new_config[k] del new_config[k]
new_config['Newznab'] = {} new_config['Newznab'] = {}

View file

@ -32,7 +32,7 @@ from tornado import gen
from tornado.escape import utf8 from tornado.escape import utf8
from tornado.web import RequestHandler from tornado.web import RequestHandler
from _23 import decode_str, filter_iter from _23 import decode_str
from six import iteritems from six import iteritems
from sg_futures import SgThreadPoolExecutor from sg_futures import SgThreadPoolExecutor
try: try:
@ -103,7 +103,7 @@ class LegacyBaseHandler(LegacyBase):
def redirect_args(self, new_url, exclude=(None,), **kwargs): def redirect_args(self, new_url, exclude=(None,), **kwargs):
args = '&'.join(['%s=%s' % (k, v) for (k, v) in args = '&'.join(['%s=%s' % (k, v) for (k, v) in
filter_iter(lambda arg: arg[1] not in exclude, iteritems(kwargs))]) filter(lambda arg: arg[1] not in exclude, iteritems(kwargs))])
self.redirect('%s%s' % (new_url, ('', '?' + args)[bool(args)]), permanent=True) self.redirect('%s%s' % (new_url, ('', '?' + args)[bool(args)]), permanent=True)
""" deprecated from BaseHandler ------------------------------------------------------------------------------------ """ deprecated from BaseHandler ------------------------------------------------------------------------------------

View file

@ -25,7 +25,7 @@ import sickgear
from ._legacy_classes import LegacySearchResult, LegacyProper from ._legacy_classes import LegacySearchResult, LegacyProper
from .common import Quality from .common import Quality
from six import integer_types, iteritems, PY2, string_types from six import integer_types, iteritems, string_types
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
@ -359,41 +359,11 @@ class OrderedDefaultdict(OrderedDict):
args = (self.default_factory,) if self.default_factory else () args = (self.default_factory,) if self.default_factory else ()
return self.__class__, args, None, None, iteritems(self) return self.__class__, args, None, None, iteritems(self)
if PY2: def first_key(self):
# backport from python 3 return next(iter(self))
def move_to_end(self, key, last=True):
"""Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist. def last_key(self):
When last=True, acts like a fast version of self[key]=self.pop(key). return next(reversed(self))
"""
link_prev, link_next, key = link = getattr(self, '_OrderedDict__map')[key]
link_prev[1] = link_next
link_next[0] = link_prev
root = getattr(self, '_OrderedDict__root')
if last:
last = root[0]
link[0] = last
link[1] = root
last[1] = root[0] = link
else:
first = root[1]
link[0] = root
link[1] = first
root[1] = first[0] = link
def first_key(self):
return getattr(self, '_OrderedDict__root')[1][2]
def last_key(self):
return getattr(self, '_OrderedDict__root')[0][2]
else:
def first_key(self):
return next(iter(self))
def last_key(self):
return next(reversed(self))
class ImageUrlList(list): class ImageUrlList(list):
@ -455,61 +425,14 @@ class EnvVar(object):
pass pass
def __getitem__(self, key): def __getitem__(self, key):
return os.environ(key) return os.environ[key]
@staticmethod @staticmethod
def get(key, default=None): def get(key, default=None):
return os.environ.get(key, default) return os.environ.get(key, default)
if not PY2: sickgear.ENV = EnvVar()
sickgear.ENV = EnvVar()
elif 'nt' == os.name:
from ctypes import windll, create_unicode_buffer
# noinspection PyCompatibility
class WinEnvVar(EnvVar):
@staticmethod
def get_environment_variable(name):
# noinspection PyUnresolvedReferences
name = unicode(name) # ensures string argument is unicode
n = windll.kernel32.GetEnvironmentVariableW(name, None, 0)
env_value = None
if n:
buf = create_unicode_buffer(u'\0' * n)
windll.kernel32.GetEnvironmentVariableW(name, buf, n)
env_value = buf.value
return env_value
def __getitem__(self, key):
return self.get_environment_variable(key)
def get(self, key, default=None):
r = self.get_environment_variable(key)
return r if None is not r else default
sickgear.ENV = WinEnvVar()
else:
# noinspection PyCompatibility
class LinuxEnvVar(EnvVar):
# noinspection PyMissingConstructor
def __init__(self, environ):
self.environ = environ
def __getitem__(self, key):
v = self.environ.get(key)
try:
return v if not isinstance(v, str) else v.decode(sickgear.SYS_ENCODING)
except (UnicodeDecodeError, UnicodeEncodeError):
return v
def get(self, key, default=None):
v = self[key]
return v if None is not v else default
sickgear.ENV = LinuxEnvVar(os.environ)
# backport from python 3 # backport from python 3

View file

@ -26,7 +26,7 @@ from .. import logger
from ..sgdatetime import timestamp_near from ..sgdatetime import timestamp_near
import sickgear import sickgear
from _23 import filter_iter, filter_list, map_list, unquote_plus from _23 import unquote_plus
from six import string_types from six import string_types
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -96,21 +96,21 @@ class DownloadStationAPI(GenericClient):
id=t['id'], title=t['title'], total_size=t.get('size') or 0, id=t['id'], title=t['title'], total_size=t.get('size') or 0,
added_ts=d.get('create_time'), last_completed_ts=d.get('completed_time'), added_ts=d.get('create_time'), last_completed_ts=d.get('completed_time'),
last_started_ts=d.get('started_time'), seed_elapsed_secs=d.get('seedelapsed'), last_started_ts=d.get('started_time'), seed_elapsed_secs=d.get('seedelapsed'),
wanted_size=sum(map_list(lambda tf: wanted(tf) and tf.get('size') or 0, f)) or None, wanted_size=sum(list(map(lambda tf: wanted(tf) and tf.get('size') or 0, f))) or None,
wanted_down=sum(map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, f)) or None, wanted_down=sum(list(map(lambda tf: wanted(tf) and downloaded(tf) or 0, f))) or None,
tally_down=downloaded(tx), tally_down=downloaded(tx),
tally_up=tx.get('size_uploaded'), tally_up=tx.get('size_uploaded'),
state='done' if re.search('finish', t['status']) else ('seed', 'down')[any(filter_list( state='done' if re.search('finish', t['status']) else ('seed', 'down')[any(list(filter(
lambda tf: wanted(tf) and (downloaded(tf, -1) < tf.get('size', 0)), f))] lambda tf: wanted(tf) and (downloaded(tf, -1) < tf.get('size', 0)), f)))]
)) ))
# only available during "download" and "seeding" # only available during "download" and "seeding"
file_list = (lambda t: t.get('additional', {}).get('file', {})) file_list = (lambda t: t.get('additional', {}).get('file', {}))
valid_stat = (lambda ti: not ti.get('error') and isinstance(ti.get('status'), string_types) valid_stat = (lambda ti: not ti.get('error') and isinstance(ti.get('status'), string_types)
and sum(map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, file_list(ti)))) and sum(list(map(lambda tf: wanted(tf) and downloaded(tf) or 0, file_list(ti)))))
result = map_list(lambda t: base_state( result = list(map(lambda t: base_state(
t, t.get('additional', {}).get('detail', {}), t.get('additional', {}).get('transfer', {}), file_list(t)), t, t.get('additional', {}).get('detail', {}), t.get('additional', {}).get('transfer', {}), file_list(t)),
filter_list(lambda t: t['status'] in ('downloading', 'seeding', 'finished') and valid_stat(t), list(filter(lambda t: t['status'] in ('downloading', 'seeding', 'finished') and valid_stat(t),
tasks)) tasks))))
return result return result
@ -133,13 +133,13 @@ class DownloadStationAPI(GenericClient):
t_params=dict(additional='detail,file,transfer'))['data']['tasks'] t_params=dict(additional='detail,file,transfer'))['data']['tasks']
else: else:
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
tasks = (filter_list(lambda d: d.get('id') == rid, self._testdata), self._testdata)[not rid] tasks = (list(filter(lambda d: d.get('id') == rid, self._testdata)), self._testdata)[not rid]
result += tasks and (isinstance(tasks, list) and tasks or (isinstance(tasks, dict) and [tasks])) \ result += tasks and (isinstance(tasks, list) and tasks or (isinstance(tasks, dict) and [tasks])) \
or ([], [{'error': True, 'id': rid}])[err] or ([], [{'error': True, 'id': rid}])[err]
except (BaseException, Exception): except (BaseException, Exception):
if getinfo: if getinfo:
result += [dict(error=True, id=rid)] result += [dict(error=True, id=rid)]
for t in filter_iter(lambda d: isinstance(d.get('title'), string_types) and d.get('title'), result): for t in filter(lambda d: isinstance(d.get('title'), string_types) and d.get('title'), result):
t['title'] = unquote_plus(t.get('title')) t['title'] = unquote_plus(t.get('title'))
return result return result
@ -211,7 +211,7 @@ class DownloadStationAPI(GenericClient):
:return: True if success, Id(s) that could not be acted upon, else Falsy if failure :return: True if success, Id(s) that could not be acted upon, else Falsy if failure
""" """
if isinstance(ids, (string_types, list)): if isinstance(ids, (string_types, list)):
rids = ids if isinstance(ids, list) else map_list(lambda x: x.strip(), ids.split(',')) rids = ids if isinstance(ids, list) else list(map(lambda x: x.strip(), ids.split(',')))
result = pause_first and self._pause_torrent(rids) # get items not paused result = pause_first and self._pause_torrent(rids) # get items not paused
result = (isinstance(result, list) and result or []) result = (isinstance(result, list) and result or [])
@ -225,7 +225,7 @@ class DownloadStationAPI(GenericClient):
if isinstance(ids, (string_types, list)): if isinstance(ids, (string_types, list)):
item = dict(fail=[], ignore=[]) item = dict(fail=[], ignore=[])
for task in filter_iter(filter_func, self._tinf(ids, err=True)): for task in filter(filter_func, self._tinf(ids, err=True)):
item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('id')] item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('id')]
# retry items not acted on # retry items not acted on
@ -237,7 +237,7 @@ class DownloadStationAPI(GenericClient):
logger.log('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i), logger.DEBUG) logger.log('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i), logger.DEBUG)
time.sleep(i) time.sleep(i)
item['fail'] = [] item['fail'] = []
for task in filter_iter(filter_func, self._tinf(retry_ids, err=True)): for task in filter(filter_func, self._tinf(retry_ids, err=True)):
item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('id')] item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('id')]
if not item['fail']: if not item['fail']:
@ -303,7 +303,7 @@ class DownloadStationAPI(GenericClient):
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
if response and response.get('success'): if response and response.get('success'):
for s in (1, 3, 5, 10, 15, 30, 60): for s in (1, 3, 5, 10, 15, 30, 60):
tasks = filter_list(lambda t: task_stamp <= t['additional']['detail']['create_time'], self._tinf()) tasks = list(filter(lambda t: task_stamp <= t['additional']['detail']['create_time'], self._tinf()))
try: try:
return str(self._client_has(tasks, uri, files)[0].get('id')) return str(self._client_has(tasks, uri, files)[0].get('id'))
except IndexError: except IndexError:
@ -324,8 +324,8 @@ class DownloadStationAPI(GenericClient):
if uri or files: if uri or files:
u = isinstance(uri, dict) and (uri.get('uri', '') or '').lower() or None u = isinstance(uri, dict) and (uri.get('uri', '') or '').lower() or None
f = isinstance(files, dict) and (files.get('file', [''])[0]).lower() or None f = isinstance(files, dict) and (files.get('file', [''])[0]).lower() or None
result = filter_list(lambda t: u and t['additional']['detail']['uri'].lower() == u result = list(filter(lambda t: u and t['additional']['detail']['uri'].lower() == u
or f and t['additional']['detail']['uri'].lower() in f, tasks) or f and t['additional']['detail']['uri'].lower() in f, tasks))
return result return result
def _client_request(self, method, t_id=None, t_params=None, files=None): def _client_request(self, method, t_id=None, t_params=None, files=None):
@ -360,7 +360,7 @@ class DownloadStationAPI(GenericClient):
return self._error_task(response) return self._error_task(response)
if None is not t_id and None is t_params and 'create' != method: if None is not t_id and None is t_params and 'create' != method:
return filter_list(lambda r: r.get('error'), response.get('data', {})) or True return list(filter(lambda r: r.get('error'), response.get('data', {}))) or True
return response return response

View file

@ -26,7 +26,7 @@ import sickgear
from requests.exceptions import HTTPError from requests.exceptions import HTTPError
from _23 import filter_iter, filter_list, map_list, unquote_plus from _23 import unquote_plus
from six import string_types from six import string_types
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -58,9 +58,9 @@ class QbittorrentAPI(GenericClient):
id=t['hash'], title=t['name'], total_size=gp.get('total_size') or 0, id=t['hash'], title=t['name'], total_size=gp.get('total_size') or 0,
added_ts=gp.get('addition_date'), last_completed_ts=gp.get('completion_date'), added_ts=gp.get('addition_date'), last_completed_ts=gp.get('completion_date'),
last_started_ts=None, seed_elapsed_secs=gp.get('seeding_time'), last_started_ts=None, seed_elapsed_secs=gp.get('seeding_time'),
wanted_size=sum(map_list(lambda tf: wanted(tf) and tf.get('size') or 0, f)) or None, wanted_size=sum(list(map(lambda tf: wanted(tf) and tf.get('size') or 0, f))) or None,
wanted_down=sum(map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, f)) or None, wanted_down=sum(list(map(lambda tf: wanted(tf) and downloaded(tf) or 0, f))) or None,
tally_down=sum(map_list(lambda tf: downloaded(tf) or 0, f)) or None, tally_down=sum(list(map(lambda tf: downloaded(tf) or 0, f))) or None,
tally_up=gp.get('total_uploaded'), tally_up=gp.get('total_uploaded'),
state='done' if 'pausedUP' == t.get('state') else ('down', 'seed')['up' in t.get('state').lower()] state='done' if 'pausedUP' == t.get('state') else ('down', 'seed')['up' in t.get('state').lower()]
)) ))
@ -68,10 +68,10 @@ class QbittorrentAPI(GenericClient):
('torrents/files', 'query/propertiesFiles/%s' % ti['hash'])[not self.api_ns], ('torrents/files', 'query/propertiesFiles/%s' % ti['hash'])[not self.api_ns],
params=({'hash': ti['hash']}, {})[not self.api_ns], json=True) or {}) params=({'hash': ti['hash']}, {})[not self.api_ns], json=True) or {})
valid_stat = (lambda ti: not self._ignore_state(ti) valid_stat = (lambda ti: not self._ignore_state(ti)
and sum(map_list(lambda tf: wanted(tf) and downloaded(tf) or 0, file_list(ti)))) and sum(list(map(lambda tf: wanted(tf) and downloaded(tf) or 0, file_list(ti)))))
result = map_list(lambda t: base_state(t, self._tinf(t['hash'])[0], file_list(t)), result = list(map(lambda t: base_state(t, self._tinf(t['hash'])[0], file_list(t)),
filter_list(lambda t: re.search('(?i)queue|stall|(up|down)load|pausedUP', t['state']) and list(filter(lambda t: re.search('(?i)queue|stall|(up|down)load|pausedUP', t['state']) and
valid_stat(t), self._tinf(ids, False))) valid_stat(t), self._tinf(ids, False)))))
return result return result
@ -109,8 +109,7 @@ class QbittorrentAPI(GenericClient):
except (BaseException, Exception): except (BaseException, Exception):
if getinfo: if getinfo:
result += [dict(error=True, id=rid)] result += [dict(error=True, id=rid)]
for t in filter_iter(lambda d: isinstance(d.get('name'), string_types) and d.get('name'), for t in filter(lambda d: isinstance(d.get('name'), string_types) and d.get('name'), (result, [])[getinfo]):
(result, [])[getinfo]):
t['name'] = unquote_plus(t.get('name')) t['name'] = unquote_plus(t.get('name'))
return result return result
@ -290,7 +289,7 @@ class QbittorrentAPI(GenericClient):
:return: True if success, Id(s) that could not be acted upon, else Falsy if failure :return: True if success, Id(s) that could not be acted upon, else Falsy if failure
""" """
if isinstance(ids, (string_types, list)): if isinstance(ids, (string_types, list)):
rids = ids if isinstance(ids, list) else map_list(lambda x: x.strip(), ids.split(',')) rids = ids if isinstance(ids, list) else list(map(lambda x: x.strip(), ids.split(',')))
result = pause_first and self._pause_torrent(rids) # get items not paused result = pause_first and self._pause_torrent(rids) # get items not paused
result = (isinstance(result, list) and result or []) result = (isinstance(result, list) and result or [])
@ -304,7 +303,7 @@ class QbittorrentAPI(GenericClient):
if isinstance(ids, (string_types, list)): if isinstance(ids, (string_types, list)):
item = dict(fail=[], ignore=[]) item = dict(fail=[], ignore=[])
for task in filter_iter(filter_func, self._tinf(ids, use_props=False, err=True)): for task in filter(filter_func, self._tinf(ids, use_props=False, err=True)):
item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('hash')] item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('hash')]
# retry items that are not acted on # retry items that are not acted on
@ -316,7 +315,7 @@ class QbittorrentAPI(GenericClient):
logger.log('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i), logger.DEBUG) logger.log('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i), logger.DEBUG)
time.sleep(i) time.sleep(i)
item['fail'] = [] item['fail'] = []
for task in filter_iter(filter_func, self._tinf(retry_ids, use_props=False, err=True)): for task in filter(filter_func, self._tinf(retry_ids, use_props=False, err=True)):
item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('hash')] item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('hash')]
if not item['fail']: if not item['fail']:
@ -378,7 +377,7 @@ class QbittorrentAPI(GenericClient):
if True is response: if True is response:
for s in (1, 3, 5, 10, 15, 30, 60): for s in (1, 3, 5, 10, 15, 30, 60):
if filter_list(lambda t: task_stamp <= t['addition_date'], self._tinf(data.hash)): if list(filter(lambda t: task_stamp <= t['addition_date'], self._tinf(data.hash))):
return data.hash return data.hash
time.sleep(s) time.sleep(s)
return True return True

View file

@ -25,7 +25,6 @@ import uuid
import sickgear import sickgear
from _23 import map_list
from six import integer_types, iterkeys, string_types from six import integer_types, iterkeys, string_types
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
@ -563,8 +562,8 @@ for (attr_name, qual_val) in [
('SNATCHED', SNATCHED), ('SNATCHED_PROPER', SNATCHED_PROPER), ('SNATCHED_BEST', SNATCHED_BEST), ('SNATCHED', SNATCHED), ('SNATCHED_PROPER', SNATCHED_PROPER), ('SNATCHED_BEST', SNATCHED_BEST),
('DOWNLOADED', DOWNLOADED), ('ARCHIVED', ARCHIVED), ('FAILED', FAILED), ('DOWNLOADED', DOWNLOADED), ('ARCHIVED', ARCHIVED), ('FAILED', FAILED),
]: ]:
setattr(Quality, attr_name, map_list(lambda qk: Quality.compositeStatus(qual_val, qk), setattr(Quality, attr_name, list(map(lambda qk: Quality.compositeStatus(qual_val, qk),
iterkeys(Quality.qualityStrings))) iterkeys(Quality.qualityStrings))))
Quality.SNATCHED_ANY = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_BEST Quality.SNATCHED_ANY = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_BEST
SD = Quality.combineQualities([Quality.SDTV, Quality.SDDVD], []) SD = Quality.combineQualities([Quality.SDTV, Quality.SDDVD], [])

View file

@ -23,7 +23,7 @@ import sickgear.providers
from . import db, helpers, logger, naming from . import db, helpers, logger, naming
from lib.api_trakt import TraktAPI from lib.api_trakt import TraktAPI
from _23 import filter_list, urlsplit, urlunsplit from _23 import urlsplit, urlunsplit
from six import string_types from six import string_types
@ -831,7 +831,7 @@ class ConfigMigrator(object):
# Migration v15: Transmithe.net variables # Migration v15: Transmithe.net variables
def _migrate_v15(self): def _migrate_v15(self):
try: try:
neb = filter_list(lambda p: 'Nebulance' in p.name, sickgear.providers.sortedProviderList())[0] neb = list(filter(lambda p: 'Nebulance' in p.name, sickgear.providers.sortedProviderList()))[0]
except (BaseException, Exception): except (BaseException, Exception):
return return
# get the old settings from the file and store them in the new variable names # get the old settings from the file and store them in the new variable names

View file

@ -32,7 +32,7 @@ from .sgdatetime import timestamp_near
from sg_helpers import make_path, compress_file, remove_file_perm, scantree from sg_helpers import make_path, compress_file, remove_file_perm, scantree
from _23 import filter_iter, filter_list, list_values, scandir from _23 import scandir
from six import iterkeys, iteritems, itervalues from six import iterkeys, iteritems, itervalues
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -80,12 +80,12 @@ def mass_upsert_sql(table_name, value_dict, key_dict, sanitise=True):
# sanity: remove k, v pairs in keyDict from valueDict # sanity: remove k, v pairs in keyDict from valueDict
if sanitise: if sanitise:
value_dict = dict(filter_iter(lambda k: k[0] not in key_dict, iteritems(value_dict))) value_dict = dict(filter(lambda k: k[0] not in key_dict, iteritems(value_dict)))
# noinspection SqlResolve # noinspection SqlResolve
cl.append(['UPDATE [%s] SET %s WHERE %s' % cl.append(['UPDATE [%s] SET %s WHERE %s' %
(table_name, ', '.join(gen_params(value_dict)), ' AND '.join(gen_params(key_dict))), (table_name, ', '.join(gen_params(value_dict)), ' AND '.join(gen_params(key_dict))),
list_values(value_dict) + list_values(key_dict)]) list(value_dict.values()) + list(key_dict.values())])
# noinspection SqlResolve # noinspection SqlResolve
cl.append(['INSERT INTO [' + table_name + '] (' + cl.append(['INSERT INTO [' + table_name + '] (' +
@ -304,14 +304,14 @@ class DBConnection(object):
query = 'UPDATE [%s] SET %s WHERE %s' % ( query = 'UPDATE [%s] SET %s WHERE %s' % (
table_name, ', '.join(gen_params(value_dict)), ' AND '.join(gen_params(key_dict))) table_name, ', '.join(gen_params(value_dict)), ' AND '.join(gen_params(key_dict)))
self.action(query, list_values(value_dict) + list_values(key_dict)) self.action(query, list(value_dict.values()) + list(key_dict.values()))
if self.connection.total_changes == changes_before: if self.connection.total_changes == changes_before:
# noinspection SqlResolve # noinspection SqlResolve
query = 'INSERT INTO [' + table_name + ']' \ query = 'INSERT INTO [' + table_name + ']' \
+ ' (%s)' % ', '.join(itertools.chain(iterkeys(value_dict), iterkeys(key_dict))) \ + ' (%s)' % ', '.join(itertools.chain(iterkeys(value_dict), iterkeys(key_dict))) \
+ ' VALUES (%s)' % ', '.join(['?'] * (len(value_dict) + len(key_dict))) + ' VALUES (%s)' % ', '.join(['?'] * (len(value_dict) + len(key_dict)))
self.action(query, list_values(value_dict) + list_values(key_dict)) self.action(query, list(value_dict.values()) + list(key_dict.values()))
def tableInfo(self, table_name): def tableInfo(self, table_name):
# type: (AnyStr) -> Dict[AnyStr, Dict[AnyStr, AnyStr]] # type: (AnyStr) -> Dict[AnyStr, Dict[AnyStr, AnyStr]]
@ -544,7 +544,7 @@ class SchemaUpgrade(object):
# get old table columns and store the ones we want to keep # get old table columns and store the ones we want to keep
result = self.connection.select('pragma table_info([%s])' % table) result = self.connection.select('pragma table_info([%s])' % table)
columns_list = ([column], column)[isinstance(column, list)] columns_list = ([column], column)[isinstance(column, list)]
keptColumns = filter_list(lambda col: col['name'] not in columns_list, result) keptColumns = list(filter(lambda col: col['name'] not in columns_list, result))
keptColumnsNames = [] keptColumnsNames = []
final = [] final = []
@ -759,9 +759,9 @@ def cleanup_old_db_backups(filename):
d, filename = os.path.split(filename) d, filename = os.path.split(filename)
if not d: if not d:
d = sickgear.DATA_DIR d = sickgear.DATA_DIR
for f in filter_iter(lambda fn: fn.is_file() and filename in fn.name and for f in filter(lambda fn: fn.is_file() and filename in fn.name and
re.search(r'\.db(\.v\d+)?\.r\d+$', fn.name), re.search(r'\.db(\.v\d+)?\.r\d+$', fn.name),
scandir(d)): scandir(d)):
try: try:
os.unlink(f.path) os.unlink(f.path)
except (BaseException, Exception): except (BaseException, Exception):

View file

@ -25,7 +25,6 @@ from .history import dateFormat
from exceptions_helper import EpisodeNotFoundException, ex from exceptions_helper import EpisodeNotFoundException, ex
from _23 import unquote from _23 import unquote
from six import PY2, text_type
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -83,10 +82,6 @@ def prepare_failed_name(release):
fixed = re.sub(r'[.\-+ ]', '_', fixed) fixed = re.sub(r'[.\-+ ]', '_', fixed)
# noinspection PyUnresolvedReferences
if PY2 and not isinstance(fixed, unicode):
fixed = text_type(fixed, 'utf-8', 'replace')
return fixed return fixed

View file

@ -43,8 +43,9 @@ import requests
import requests.exceptions import requests.exceptions
import subliminal import subliminal
from lxml_etree import etree, is_lxml from lxml_etree import etree, is_lxml
from base64 import decodebytes as b64decodebytes, encodebytes as b64encodebytes
from _23 import b64decodebytes, b64encodebytes, decode_bytes, decode_str, filter_iter, scandir from _23 import decode_bytes, decode_str, scandir
from six import iteritems, string_types, text_type from six import iteritems, string_types, text_type
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
from six.moves import zip from six.moves import zip
@ -1317,7 +1318,7 @@ def has_anime():
:rtype: bool :rtype: bool
""" """
# noinspection PyTypeChecker # noinspection PyTypeChecker
return False if not sickgear.showList else any(filter_iter(lambda show: show.is_anime, sickgear.showList)) return False if not sickgear.showList else any(filter(lambda show: show.is_anime, sickgear.showList))
def cpu_sleep(): def cpu_sleep():
@ -1682,7 +1683,7 @@ def upgrade_new_naming():
(d_entry.path, new_dir_name, repr(e), ex(e)), logger.WARNING) (d_entry.path, new_dir_name, repr(e), ex(e)), logger.WARNING)
if os.path.isdir(new_dir_name): if os.path.isdir(new_dir_name):
try: try:
f_n = filter_iter(lambda fn: fn.is_file(), scandir(new_dir_name)) f_n = filter(lambda fn: fn.is_file(), scandir(new_dir_name))
except OSError as e: except OSError as e:
logger.log('Unable to rename %s / %s' % (repr(e), ex(e)), logger.log('Unable to rename %s / %s' % (repr(e), ex(e)),
logger.WARNING) logger.WARNING)

View file

@ -22,8 +22,6 @@ from .common import FAILED, SNATCHED, SNATCHED_PROPER, SUBTITLED, Quality
from .name_parser.parser import NameParser from .name_parser.parser import NameParser
import sickgear import sickgear
from six import PY2, text_type
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
from typing import Any, AnyStr from typing import Any, AnyStr
@ -47,9 +45,6 @@ def _log_history_item(action, tvid, prodid, season, episode, quality, resource,
""" """
log_date = datetime.datetime.now().strftime(dateFormat) log_date = datetime.datetime.now().strftime(dateFormat)
if PY2 and not isinstance(resource, text_type):
resource = text_type(resource, 'utf-8', 'replace')
my_db = db.DBConnection() my_db = db.DBConnection()
my_db.action( my_db.action(
'INSERT INTO history' 'INSERT INTO history'

View file

@ -26,8 +26,7 @@ import sickgear
from lib.dateutil.parser import parse from lib.dateutil.parser import parse
from _23 import unidecode from six import iteritems, moves, string_types
from six import iteritems, moves, string_types, PY2
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
@ -178,9 +177,7 @@ def clean_show_name(showname):
:return: :return:
:rtype: AnyStr :rtype: AnyStr
""" """
if not PY2: return re.sub(r'[(\s]*(?:19|20)\d\d[)\s]*$', '', showname)
return re.sub(r'[(\s]*(?:19|20)\d\d[)\s]*$', '', showname)
return re.sub(r'[(\s]*(?:19|20)\d\d[)\s]*$', '', unidecode(showname))
def get_show_name_date(show_obj): def get_show_name_date(show_obj):

View file

@ -20,8 +20,6 @@ from sg_helpers import proxy_setting
import sickgear import sickgear
from lib.tvinfo_base import TVInfoBase from lib.tvinfo_base import TVInfoBase
from _23 import list_values
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
from typing import AnyStr, Dict from typing import AnyStr, Dict
@ -83,13 +81,13 @@ class TVInfoAPI(object):
@property @property
def sources(self): def sources(self):
# type: () -> Dict[int, AnyStr] # type: () -> Dict[int, AnyStr]
return dict([(int(x['id']), x['name']) for x in list_values(tvinfo_config) if not x['mapped_only'] and return dict([(int(x['id']), x['name']) for x in list(tvinfo_config.values()) if not x['mapped_only'] and
True is not x.get('fallback') and True is not x.get('people_only')]) True is not x.get('fallback') and True is not x.get('people_only')])
@property @property
def search_sources(self): def search_sources(self):
# type: () -> Dict[int, AnyStr] # type: () -> Dict[int, AnyStr]
return dict([(int(x['id']), x['name']) for x in list_values(tvinfo_config) if not x['mapped_only'] and return dict([(int(x['id']), x['name']) for x in list(tvinfo_config.values()) if not x['mapped_only'] and
x.get('active') and not x.get('defunct') and True is not x.get('fallback') x.get('active') and not x.get('defunct') and True is not x.get('fallback')
and True is not x.get('people_only')]) and True is not x.get('people_only')])
@ -99,7 +97,7 @@ class TVInfoAPI(object):
""" """
:return: return all indexers including mapped only indexers excluding fallback indexers :return: return all indexers including mapped only indexers excluding fallback indexers
""" """
return dict([(int(x['id']), x['name']) for x in list_values(tvinfo_config) if True is not x.get('fallback') return dict([(int(x['id']), x['name']) for x in list(tvinfo_config.values()) if True is not x.get('fallback')
and True is not x.get('people_only')]) and True is not x.get('people_only')])
@property @property
@ -108,9 +106,9 @@ class TVInfoAPI(object):
""" """
:return: return all fallback indexers :return: return all fallback indexers
""" """
return dict([(int(x['id']), x['name']) for x in list_values(tvinfo_config) if True is x.get('fallback')]) return dict([(int(x['id']), x['name']) for x in list(tvinfo_config.values()) if True is x.get('fallback')])
@property @property
def xem_supported_sources(self): def xem_supported_sources(self):
# type: () -> Dict[int, AnyStr] # type: () -> Dict[int, AnyStr]
return dict([(int(x['id']), x['name']) for x in list_values(tvinfo_config) if x.get('xem_origin')]) return dict([(int(x['id']), x['name']) for x in list(tvinfo_config.values()) if x.get('xem_origin')])

View file

@ -19,11 +19,10 @@ __all__ = ['generic', 'helpers', 'kodi', 'mede8er', 'mediabrowser', 'ps3', 'tivo
import sys import sys
from . import kodi, mede8er, mediabrowser, ps3, tivo, wdtv, xbmc, xbmc_12plus from . import kodi, mede8er, mediabrowser, ps3, tivo, wdtv, xbmc, xbmc_12plus
from _23 import filter_list
def available_generators(): def available_generators():
return filter_list(lambda x: x not in ('generic', 'helpers'), __all__) return list(filter(lambda x: x not in ('generic', 'helpers'), __all__))
def _getMetadataModule(name): def _getMetadataModule(name):

View file

@ -35,7 +35,6 @@ from lib.fanart.core import Request as fanartRequest
import lib.fanart as fanart import lib.fanart as fanart
from lxml_etree import etree from lxml_etree import etree
from _23 import filter_iter, list_keys
from six import iteritems, itervalues, string_types from six import iteritems, itervalues, string_types
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -874,7 +873,7 @@ class GenericMetadata(object):
tv_id).name + ", not downloading images: " + ex(e), logger.WARNING) tv_id).name + ", not downloading images: " + ex(e), logger.WARNING)
# todo: when tmdb is added as tv source remove the hardcoded TVINFO_TMDB # todo: when tmdb is added as tv source remove the hardcoded TVINFO_TMDB
for tv_src in list(OrderedDict.fromkeys([show_obj.tvid] + list_keys(sickgear.TVInfoAPI().search_sources) + for tv_src in list(OrderedDict.fromkeys([show_obj.tvid] + list(sickgear.TVInfoAPI().search_sources) +
[TVINFO_TMDB])): [TVINFO_TMDB])):
if tv_src != show_obj.tvid and not show_obj.ids.get(tv_src, {}).get('id'): if tv_src != show_obj.tvid and not show_obj.ids.get(tv_src, {}).get('id'):
continue continue
@ -1220,9 +1219,9 @@ class GenericMetadata(object):
resp = request.response() resp = request.response()
itemlist = [] itemlist = []
dedupe = [] dedupe = []
for art in filter_iter(lambda i: 10 < len(i.get('url', '')) and (lang == i.get('lang', '')[0:2]), for art in filter(lambda i: 10 < len(i.get('url', '')) and (lang == i.get('lang', '')[0:2]),
# remove "[0:2]" ... to strictly use only data where "en" is at source # remove "[0:2]" ... to strictly use only data where "en" is at source
resp[types[image_type]]): # type: dict resp[types[image_type]]): # type: dict
try: try:
url = (art['url'], art['url'].replace('/fanart/', '/preview/'))[thumb] url = (art['url'], art['url'].replace('/fanart/', '/preview/'))[thumb]
if url not in dedupe: if url not in dedupe:

View file

@ -29,7 +29,7 @@ import exceptions_helper
from exceptions_helper import ex from exceptions_helper import ex
from lxml_etree import etree from lxml_etree import etree
from _23 import decode_str, map_iter from _23 import decode_str
from six import string_types from six import string_types
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -157,7 +157,7 @@ class KODIMetadata(generic.GenericMetadata):
has_id = False has_id = False
tvdb_id = None tvdb_id = None
for tvid, slug in map_iter( for tvid, slug in map(
lambda _tvid: (_tvid, sickgear.TVInfoAPI(_tvid).config.get('kodi_slug')), lambda _tvid: (_tvid, sickgear.TVInfoAPI(_tvid).config.get('kodi_slug')),
list(sickgear.TVInfoAPI().all_sources)): list(sickgear.TVInfoAPI().all_sources)):
mid = slug and show_obj.ids[tvid].get('id') mid = slug and show_obj.ids[tvid].get('id')

View file

@ -39,8 +39,8 @@ from lib.tvinfo_base.exceptions import *
from ..classes import OrderedDefaultdict from ..classes import OrderedDefaultdict
from .._legacy_classes import LegacyParseResult from .._legacy_classes import LegacyParseResult
from _23 import decode_str, list_keys, list_range from _23 import decode_str, list_range
from six import iteritems, iterkeys, itervalues, PY2, string_types, text_type from six import iteritems, iterkeys, itervalues, string_types, text_type
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
@ -166,7 +166,7 @@ class NameParser(object):
result.which_regex = [cur_regex_name] result.which_regex = [cur_regex_name]
result.score = 0 - cur_regex_num result.score = 0 - cur_regex_num
named_groups = list_keys(match.groupdict()) named_groups = list(match.groupdict())
if 'series_name' in named_groups: if 'series_name' in named_groups:
result.series_name = match.group('series_name') result.series_name = match.group('series_name')
@ -511,10 +511,7 @@ class NameParser(object):
@staticmethod @staticmethod
def _unicodify(obj, encoding='utf-8'): def _unicodify(obj, encoding='utf-8'):
if PY2 and isinstance(obj, string_types): if isinstance(obj, text_type):
if not isinstance(obj, text_type):
obj = text_type(obj, encoding, 'replace')
if not PY2 and isinstance(obj, text_type):
try: try:
return obj.encode('latin1').decode('utf8') return obj.encode('latin1').decode('utf8')
except (BaseException, Exception): except (BaseException, Exception):
@ -751,9 +748,7 @@ class ParseResult(LegacyParseResult):
self.release_group, self.air_date, tuple(self.ab_episode_numbers))) self.release_group, self.air_date, tuple(self.ab_episode_numbers)))
def __str__(self): def __str__(self):
if not PY2: return self.__unicode__()
return self.__unicode__()
return self.__unicode__().encode('utf-8', errors='ignore')
def __unicode__(self): def __unicode__(self):
if None is not self.series_name: if None is not self.series_name:

View file

@ -29,8 +29,7 @@ from lib.dateutil import tz, zoneinfo
from lib.tzlocal import get_localzone from lib.tzlocal import get_localzone
from sg_helpers import remove_file_perm, scantree from sg_helpers import remove_file_perm, scantree
from six import integer_types, iteritems, string_types, PY2 from six import integer_types, iteritems, string_types
from _23 import list_keys
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
@ -547,7 +546,7 @@ def _load_network_conversions():
# remove deleted records # remove deleted records
if 0 < len(conversions_db): if 0 < len(conversions_db):
network_name = list_keys(conversions_db) network_name = list(conversions_db)
cl.append(['DELETE FROM network_conversions WHERE tvdb_network' cl.append(['DELETE FROM network_conversions WHERE tvdb_network'
' IN (%s)' % ','.join(['?'] * len(network_name)), network_name]) ' IN (%s)' % ','.join(['?'] * len(network_name)), network_name])
@ -632,8 +631,6 @@ def get_episode_time(d, # type: int
if d and None is not ep_time and None is not tzinfo: if d and None is not ep_time and None is not tzinfo:
ep_date = datetime.date.fromordinal(helpers.try_int(d)) ep_date = datetime.date.fromordinal(helpers.try_int(d))
if PY2:
return datetime.datetime.combine(ep_date, ep_time).replace(tzinfo=tzinfo)
return datetime.datetime.combine(ep_date, ep_time, tzinfo) return datetime.datetime.combine(ep_date, ep_time, tzinfo)
return parse_date_time(d, t, tzinfo) return parse_date_time(d, t, tzinfo)

View file

@ -25,8 +25,6 @@ from . import emby, kodi, plex, xbmc, \
import sickgear import sickgear
from _23 import filter_iter, list_values
class NotifierFactory(object): class NotifierFactory(object):
@ -68,32 +66,27 @@ class NotifierFactory(object):
:return: ID String :return: ID String
:rtype: String :rtype: String
""" """
for n in filter_iter(lambda v: v.is_enabled(), for n in filter(lambda v: v.is_enabled(), list(self.notifiers.values())):
list_values(self.notifiers)):
yield n.id() yield n.id()
@property @property
def enabled_onsnatch(self): def enabled_onsnatch(self):
for n in filter_iter(lambda v: v.is_enabled() and v.is_enabled_onsnatch(), for n in filter(lambda v: v.is_enabled() and v.is_enabled_onsnatch(), list(self.notifiers.values())):
list_values(self.notifiers)):
yield n.id() yield n.id()
@property @property
def enabled_ondownload(self): def enabled_ondownload(self):
for n in filter_iter(lambda v: v.is_enabled() and v.is_enabled_ondownload(), for n in filter(lambda v: v.is_enabled() and v.is_enabled_ondownload(), list(self.notifiers.values())):
list_values(self.notifiers)):
yield n.id() yield n.id()
@property @property
def enabled_onsubtitledownload(self): def enabled_onsubtitledownload(self):
for n in filter_iter(lambda v: v.is_enabled() and v.is_enabled_onsubtitledownload(), for n in filter(lambda v: v.is_enabled() and v.is_enabled_onsubtitledownload(), list(self.notifiers.values())):
list_values(self.notifiers)):
yield n.id() yield n.id()
@property @property
def enabled_library(self): def enabled_library(self):
for n in filter_iter(lambda v: v.is_enabled() and v.is_enabled_library(), for n in filter(lambda v: v.is_enabled() and v.is_enabled_library(), list(self.notifiers.values())):
list_values(self.notifiers)):
yield n.id() yield n.id()
def get(self, nid): def get(self, nid):

View file

@ -21,7 +21,7 @@ from .generic import Notifier
from json_helper import json_loads from json_helper import json_loads
import sickgear import sickgear
from _23 import decode_bytes, decode_str, map_list from _23 import decode_bytes, decode_str
class EmbyNotifier(Notifier): class EmbyNotifier(Notifier):
@ -50,7 +50,7 @@ class EmbyNotifier(Notifier):
timeout=20, hooks=dict(response=self._cb_response), json=True) timeout=20, hooks=dict(response=self._cb_response), json=True)
return self.response and self.response.get('ok') and 200 == self.response.get('status_code') and \ return self.response and self.response.get('ok') and 200 == self.response.get('status_code') and \
version <= map_list(lambda x: int(x), (response and response.get('Version') or '0.0.0.0').split('.')) version <= list(map(lambda x: int(x), (response and response.get('Version') or '0.0.0.0').split('.')))
def update_library(self, show_obj=None, **kwargs): def update_library(self, show_obj=None, **kwargs):
""" Update library function """ Update library function

View file

@ -20,8 +20,8 @@ from .generic import Notifier
import sickgear import sickgear
from exceptions_helper import ex from exceptions_helper import ex
from _23 import b64encodestring, decode_str, etree, filter_iter, list_values, unquote_plus, urlencode from _23 import b64encodestring, decode_str, etree, unquote_plus, urlencode
from six import iteritems, text_type, PY2 from six import iteritems
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
from six.moves import urllib from six.moves import urllib
@ -49,8 +49,7 @@ class PLEXNotifier(Notifier):
return False return False
for key in command: for key in command:
if not PY2 or type(command[key]) == text_type: command[key] = command[key].encode('utf-8')
command[key] = command[key].encode('utf-8')
enc_command = urlencode(command) enc_command = urlencode(command)
self._log_debug(u'Encoded API command: ' + enc_command) self._log_debug(u'Encoded API command: ' + enc_command)
@ -203,7 +202,7 @@ class PLEXNotifier(Notifier):
hosts_failed.append(cur_host) hosts_failed.append(cur_host)
continue continue
for section in filter_iter(lambda x: 'show' == x.attrib['type'], sections): for section in filter(lambda x: 'show' == x.attrib['type'], sections):
if str(section.attrib['key']) in hosts_all: if str(section.attrib['key']) in hosts_all:
continue continue
keyed_host = [(str(section.attrib['key']), cur_host)] keyed_host = [(str(section.attrib['key']), cur_host)]
@ -247,18 +246,14 @@ class PLEXNotifier(Notifier):
return '' return ''
hosts = [ hosts = [
host.replace('http://', '') for host in filter_iter(lambda x: x.startswith('http:'), host.replace('http://', '') for host in filter(lambda x: x.startswith('http:'), list(hosts_all.values()))]
list_values(hosts_all))]
secured = [ secured = [
host.replace('https://', '') for host in filter_iter(lambda x: x.startswith('https:'), host.replace('https://', '') for host in filter(lambda x: x.startswith('https:'), list(hosts_all.values()))]
list_values(hosts_all))]
failed = ', '.join([ failed = ', '.join([
host.replace('http://', '') for host in filter_iter(lambda x: x.startswith('http:'), host.replace('http://', '') for host in filter(lambda x: x.startswith('http:'), hosts_failed)])
hosts_failed)]) failed_secured = ', '.join(filter(
failed_secured = ', '.join(filter_iter(
lambda x: x not in hosts, lambda x: x not in hosts,
[host.replace('https://', '') for host in filter_iter(lambda x: x.startswith('https:'), [host.replace('https://', '') for host in filter(lambda x: x.startswith('https:'), hosts_failed)]))
hosts_failed)]))
return '<br>' + '<br>'.join([result for result in [ return '<br>' + '<br>'.join([result for result in [
('', 'Fail: username/password when fetching credentials from plex.tv')[False is token_arg], ('', 'Fail: username/password when fetching credentials from plex.tv')[False is token_arg],

View file

@ -22,7 +22,6 @@ import sickgear
from lib.api_trakt import TraktAPI, exceptions from lib.api_trakt import TraktAPI, exceptions
from exceptions_helper import ConnectionSkipException from exceptions_helper import ConnectionSkipException
from _23 import list_keys
from six import iteritems from six import iteritems
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -38,7 +37,7 @@ class TraktNotifier(BaseNotifier):
def is_enabled_library(cls): def is_enabled_library(cls):
if sickgear.TRAKT_ACCOUNTS: if sickgear.TRAKT_ACCOUNTS:
for tid, locations in iteritems(sickgear.TRAKT_UPDATE_COLLECTION): for tid, locations in iteritems(sickgear.TRAKT_UPDATE_COLLECTION):
if tid in list_keys(sickgear.TRAKT_ACCOUNTS): if tid in list(sickgear.TRAKT_ACCOUNTS):
return True return True
return False return False
@ -89,7 +88,7 @@ class TraktNotifier(BaseNotifier):
data['shows'][0]['seasons'][0]['episodes'].append({'number': cur_ep_obj.episode}) data['shows'][0]['seasons'][0]['episodes'].append({'number': cur_ep_obj.episode})
for tid, locations in iteritems(sickgear.TRAKT_UPDATE_COLLECTION): for tid, locations in iteritems(sickgear.TRAKT_UPDATE_COLLECTION):
if tid not in list_keys(sickgear.TRAKT_ACCOUNTS): if tid not in list(sickgear.TRAKT_ACCOUNTS):
continue continue
for loc in locations: for loc in locations:
if not ep_obj.location.startswith('%s%s' % (loc.rstrip(os.path.sep), os.path.sep)): if not ep_obj.location.startswith('%s%s' % (loc.rstrip(os.path.sep), os.path.sep)):

View file

@ -23,7 +23,6 @@ from exceptions_helper import ex
from json_helper import json_dumps, json_load from json_helper import json_dumps, json_load
from _23 import b64encodestring, decode_str, etree, quote, unquote, unquote_plus, urlencode from _23 import b64encodestring, decode_str, etree, quote, unquote, unquote_plus, urlencode
from six import PY2, text_type
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
from six.moves import urllib from six.moves import urllib
@ -150,8 +149,7 @@ class XBMCNotifier(Notifier):
password = self._choose(password, sickgear.XBMC_PASSWORD) password = self._choose(password, sickgear.XBMC_PASSWORD)
for key in command: for key in command:
if not PY2 or type(command[key]) == text_type: command[key] = command[key].encode('utf-8')
command[key] = command[key].encode('utf-8')
enc_command = urlencode(command) enc_command = urlencode(command)
self._log_debug(u'Encoded API command: ' + enc_command) self._log_debug(u'Encoded API command: ' + enc_command)

View file

@ -10,8 +10,7 @@ import re
from json_helper import json_loads from json_helper import json_loads
from sg_helpers import cmdline_runner, is_virtualenv from sg_helpers import cmdline_runner, is_virtualenv
from _23 import filter_list, ordered_dict from six import iteritems
from six import iteritems, PY2
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
@ -51,10 +50,6 @@ def run_pip(pip_cmd, suppress_stderr=False):
pip_cmd += ['--progress-bar', 'off'] pip_cmd += ['--progress-bar', 'off']
new_pip_arg = ['--no-python-version-warning'] new_pip_arg = ['--no-python-version-warning']
if PY2:
pip_version, _, _ = _get_pip_version()
if pip_version and 20 > int(pip_version.split('.')[0]):
new_pip_arg = []
return cmdline_runner( return cmdline_runner(
[sys.executable, '-m', 'pip'] + new_pip_arg + ['--disable-pip-version-check'] + pip_cmd, [sys.executable, '-m', 'pip'] + new_pip_arg + ['--disable-pip-version-check'] + pip_cmd,
@ -72,7 +67,7 @@ def initial_requirements():
from Cheetah import VersionTuple from Cheetah import VersionTuple
is_cheetah2 = (3, 0, 0) > VersionTuple[0:3] is_cheetah2 = (3, 0, 0) > VersionTuple[0:3]
is_cheetah3py3 = not PY2 and (3, 3, 0) > VersionTuple[0:3] is_cheetah3py3 = (3, 3, 0) > VersionTuple[0:3]
if not (is_cheetah2 or is_cheetah3py3): if not (is_cheetah2 or is_cheetah3py3):
return return
@ -158,13 +153,10 @@ def check_pip_env():
_, _, installed, failed_names = _check_pip_env() _, _, installed, failed_names = _check_pip_env()
py2_last = 'final py2 release'
boost = 'performance boost' boost = 'performance boost'
extra_info = dict({'Cheetah3': 'filled requirement', 'CT3': 'filled requirement', extra_info = dict({'Cheetah3': 'filled requirement', 'CT3': 'filled requirement',
'lxml': boost, 'python-Levenshtein': boost}) 'lxml': boost, 'python-Levenshtein': boost})
extra_info.update((dict(cryptography=py2_last, pip=py2_last, regex=py2_last, extra_info.update(dict(regex=boost))
scandir=boost, setuptools=py2_last),
dict(regex=boost))[not PY2])
return installed, extra_info, failed_names return installed, extra_info, failed_names
@ -256,9 +248,9 @@ def _check_pip_env(pip_outdated=False, reset_fails=False):
names_outdated = dict({cur_item.get('name'): {k: cur_item.get(k) for k in ('version', 'latest_version', names_outdated = dict({cur_item.get('name'): {k: cur_item.get(k) for k in ('version', 'latest_version',
'latest_filetype')} 'latest_filetype')}
for cur_item in json_loads(output)}) for cur_item in json_loads(output)})
to_update = set(filter_list( to_update = set(list(filter(
lambda name: name in specifiers and names_outdated[name]['latest_version'] in specifiers[name], lambda name: name in specifiers and names_outdated[name]['latest_version'] in specifiers[name],
set(names_reco).intersection(set(names_outdated)))) set(names_reco).intersection(set(names_outdated)))))
# check whether to ignore direct reference specification updates if not dev mode # check whether to ignore direct reference specification updates if not dev mode
if not int(os.environ.get('CHK_URL_SPECIFIERS', 0)): if not int(os.environ.get('CHK_URL_SPECIFIERS', 0)):
@ -272,7 +264,7 @@ def _check_pip_env(pip_outdated=False, reset_fails=False):
except (BaseException, Exception): except (BaseException, Exception):
pass pass
updates_todo = ordered_dict() updates_todo = dict()
todo = to_install.union(to_update, requirement_update) todo = to_install.union(to_update, requirement_update)
for cur_name in [cur_n for cur_n in names_reco if cur_n in todo]: for cur_name in [cur_n for cur_n in names_reco if cur_n in todo]:
updates_todo[cur_name] = dict({ updates_todo[cur_name] = dict({

View file

@ -33,7 +33,7 @@ from .indexers.indexer_config import TVINFO_TVDB
from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser
from _23 import decode_str from _23 import decode_str
from six import iteritems, PY2, string_types from six import iteritems, string_types
from sg_helpers import long_path, cmdline_runner from sg_helpers import long_path, cmdline_runner
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -824,12 +824,7 @@ class PostProcessor(object):
script_cmd[0] = os.path.abspath(script_cmd[0]) script_cmd[0] = os.path.abspath(script_cmd[0])
self._log(u'Absolute path to script: ' + script_cmd[0], logger.DEBUG) self._log(u'Absolute path to script: ' + script_cmd[0], logger.DEBUG)
if PY2: script_cmd += [ep_obj.location, self.file_path]
script_cmd += [ep_obj.location.encode(sickgear.SYS_ENCODING),
self.file_path.encode(sickgear.SYS_ENCODING)
]
else:
script_cmd += [ep_obj.location, self.file_path]
script_cmd += ([], [str(ep_obj.show_obj.tvid)])[new_call] + [ script_cmd += ([], [str(ep_obj.show_obj.tvid)])[new_call] + [
str(ep_obj.show_obj.prodid), str(ep_obj.show_obj.prodid),
@ -1174,9 +1169,8 @@ class PostProcessor(object):
keepalive = keepalive_stop = None keepalive = keepalive_stop = None
if self.webhandler: if self.webhandler:
def keep_alive(webh, stop_event): def keep_alive(webh, stop_event):
if not PY2: import asyncio
import asyncio asyncio.set_event_loop(asyncio.new_event_loop())
asyncio.set_event_loop(asyncio.new_event_loop())
while not stop_event.is_set(): while not stop_event.is_set():
stop_event.wait(60) stop_event.wait(60)
webh('.') webh('.')

View file

@ -35,8 +35,7 @@ from .history import reset_status
from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser
from .sgdatetime import timestamp_near from .sgdatetime import timestamp_near
from _23 import filter_list, filter_iter, list_values, map_iter from six import iteritems, iterkeys, string_types, text_type
from six import iteritems, iterkeys, string_types, PY2, text_type
from sg_helpers import long_path, scantree from sg_helpers import long_path, scantree
import lib.rarfile.rarfile as rarfile import lib.rarfile.rarfile as rarfile
@ -281,7 +280,7 @@ class ProcessTVShow(object):
build_path = (lambda old_path: '%s%s' % (helpers.real_path(old_path).rstrip(os.path.sep), os.path.sep)) build_path = (lambda old_path: '%s%s' % (helpers.real_path(old_path).rstrip(os.path.sep), os.path.sep))
process_path = build_path(path) process_path = build_path(path)
for parent in map_iter(lambda p: build_path(p), sickgear.ROOT_DIRS.split('|')[1:]): for parent in map(lambda p: build_path(p), sickgear.ROOT_DIRS.split('|')[1:]):
if process_path.startswith(parent): if process_path.startswith(parent):
return parent.rstrip(os.path.sep) return parent.rstrip(os.path.sep)
@ -352,7 +351,7 @@ class ProcessTVShow(object):
path, dirs, files = self._get_path_dir_files(dir_name, nzb_name, pp_type) path, dirs, files = self._get_path_dir_files(dir_name, nzb_name, pp_type)
if sickgear.POSTPONE_IF_SYNC_FILES and any(filter_iter(helpers.is_sync_file, files)): if sickgear.POSTPONE_IF_SYNC_FILES and any(filter(helpers.is_sync_file, files)):
self._log_helper(u'Found temporary sync files, skipping post process', logger.ERROR) self._log_helper(u'Found temporary sync files, skipping post process', logger.ERROR)
return self.result return self.result
@ -367,7 +366,7 @@ class ProcessTVShow(object):
work_files += [joined] work_files += [joined]
rar_files, rarfile_history = self.unused_archives( rar_files, rarfile_history = self.unused_archives(
path, filter_list(helpers.is_first_rar_volume, files), pp_type, process_method) path, list(filter(helpers.is_first_rar_volume, files)), pp_type, process_method)
rar_content = self._unrar(path, rar_files, force) rar_content = self._unrar(path, rar_files, force)
if self.fail_detected: if self.fail_detected:
self._process_failed(dir_name, nzb_name, show_obj=show_obj) self._process_failed(dir_name, nzb_name, show_obj=show_obj)
@ -376,8 +375,8 @@ class ProcessTVShow(object):
rar_content = [x for x in rar_content if not helpers.is_link(os.path.join(path, x))] rar_content = [x for x in rar_content if not helpers.is_link(os.path.join(path, x))]
path, dirs, files = self._get_path_dir_files(dir_name, nzb_name, pp_type) path, dirs, files = self._get_path_dir_files(dir_name, nzb_name, pp_type)
files = [x for x in files if not helpers.is_link(os.path.join(path, x))] files = [x for x in files if not helpers.is_link(os.path.join(path, x))]
video_files = filter_list(helpers.has_media_ext, files) video_files = list(filter(helpers.has_media_ext, files))
video_in_rar = filter_list(helpers.has_media_ext, rar_content) video_in_rar = list(filter(helpers.has_media_ext, rar_content))
work_files += [os.path.join(path, item) for item in rar_content] work_files += [os.path.join(path, item) for item in rar_content]
if 0 < len(files): if 0 < len(files):
@ -438,7 +437,7 @@ class ProcessTVShow(object):
for walk_path, walk_dir, files in os.walk(os.path.join(path, directory), topdown=False): for walk_path, walk_dir, files in os.walk(os.path.join(path, directory), topdown=False):
if sickgear.POSTPONE_IF_SYNC_FILES and any(filter_iter(helpers.is_sync_file, files)): if sickgear.POSTPONE_IF_SYNC_FILES and any(filter(helpers.is_sync_file, files)):
self._log_helper(u'Found temporary sync files, skipping post process', logger.ERROR) self._log_helper(u'Found temporary sync files, skipping post process', logger.ERROR)
return self.result return self.result
@ -452,7 +451,7 @@ class ProcessTVShow(object):
files = [x for x in files if not helpers.is_link(os.path.join(walk_path, x))] files = [x for x in files if not helpers.is_link(os.path.join(walk_path, x))]
rar_files, rarfile_history = self.unused_archives( rar_files, rarfile_history = self.unused_archives(
walk_path, filter_list(helpers.is_first_rar_volume, files), pp_type, process_method, walk_path, list(filter(helpers.is_first_rar_volume, files)), pp_type, process_method,
rarfile_history) rarfile_history)
rar_content = self._unrar(walk_path, rar_files, force) rar_content = self._unrar(walk_path, rar_files, force)
work_files += [os.path.join(walk_path, item) for item in rar_content] work_files += [os.path.join(walk_path, item) for item in rar_content]
@ -461,8 +460,8 @@ class ProcessTVShow(object):
continue continue
rar_content = [x for x in rar_content if not helpers.is_link(os.path.join(walk_path, x))] rar_content = [x for x in rar_content if not helpers.is_link(os.path.join(walk_path, x))]
files = list(set(files + rar_content)) files = list(set(files + rar_content))
video_files = filter_list(helpers.has_media_ext, files) video_files = list(filter(helpers.has_media_ext, files))
video_in_rar = filter_list(helpers.has_media_ext, rar_content) video_in_rar = list(filter(helpers.has_media_ext, rar_content))
notwanted_files = [x for x in files if x not in video_files] notwanted_files = [x for x in files if x not in video_files]
# Don't Link media when the media is extracted from a rar in the same path # Don't Link media when the media is extracted from a rar in the same path
@ -640,7 +639,7 @@ class ProcessTVShow(object):
all_dirs += process_dir all_dirs += process_dir
all_files += fileList all_files += fileList
video_files = filter_list(helpers.has_media_ext, all_files) video_files = list(filter(helpers.has_media_ext, all_files))
all_dirs.append(dir_name) all_dirs.append(dir_name)
# check if the directory have at least one tv video file # check if the directory have at least one tv video file
@ -660,7 +659,7 @@ class ProcessTVShow(object):
if sickgear.UNPACK and process_path and all_files: if sickgear.UNPACK and process_path and all_files:
# Search for packed release # Search for packed release
packed_files = filter_list(helpers.is_first_rar_volume, all_files) packed_files = list(filter(helpers.is_first_rar_volume, all_files))
for packed in packed_files: for packed in packed_files:
try: try:
@ -719,9 +718,8 @@ class ProcessTVShow(object):
rar_content = [os.path.normpath(x.filename) for x in rar_handle.infolist() if not x.is_dir()] rar_content = [os.path.normpath(x.filename) for x in rar_handle.infolist() if not x.is_dir()]
renamed = self.cleanup_names(path, rar_content) renamed = self.cleanup_names(path, rar_content)
cur_unpacked = rar_content if not renamed else \ cur_unpacked = rar_content if not renamed else \
(list(set(rar_content) - set(iterkeys(renamed))) + list_values(renamed)) (list(set(rar_content) - set(iterkeys(renamed))) + list(renamed.values()))
self._log_helper(u'Unpacked content: [u\'%s\']' % '\', u\''.join(map_iter(text_type, self._log_helper(u'Unpacked content: [u\'%s\']' % '\', u\''.join(map(text_type, cur_unpacked)))
cur_unpacked)))
unpacked_files += cur_unpacked unpacked_files += cur_unpacked
except (rarfile.PasswordRequired, rarfile.RarWrongPassword): except (rarfile.PasswordRequired, rarfile.RarWrongPassword):
self._log_helper(u'Failed to unpack archive PasswordRequired: %s' % archive, logger.ERROR) self._log_helper(u'Failed to unpack archive PasswordRequired: %s' % archive, logger.ERROR)
@ -928,10 +926,6 @@ class ProcessTVShow(object):
if force or not self.any_vid_processed: if force or not self.any_vid_processed:
return False return False
# Needed for accessing DB with a unicode dir_name
if PY2 and not isinstance(dir_name, text_type):
dir_name = text_type(dir_name, 'utf_8')
parse_result = None parse_result = None
try: try:
parse_result = NameParser(convert=True).parse(videofile, cache_result=False) parse_result = NameParser(convert=True).parse(videofile, cache_result=False)
@ -974,8 +968,6 @@ class ProcessTVShow(object):
else: else:
# This is needed for video whose name differ from dir_name # This is needed for video whose name differ from dir_name
if PY2 and not isinstance(videofile, text_type):
videofile = text_type(videofile, 'utf_8')
sql_result = my_db.select( sql_result = my_db.select(
'SELECT * FROM tv_episodes WHERE release_name = ?', [videofile.rpartition('.')[0]]) 'SELECT * FROM tv_episodes WHERE release_name = ?', [videofile.rpartition('.')[0]])

View file

@ -32,7 +32,7 @@ from .history import dateFormat
from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser
from .sgdatetime import timestamp_near from .sgdatetime import timestamp_near
from _23 import filter_iter, filter_list, list_values, map_consume, map_list from _23 import map_consume
from six import string_types from six import string_types
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -251,9 +251,9 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime
# filter provider list for: # filter provider list for:
# 1. from recent search: recent search enabled providers # 1. from recent search: recent search enabled providers
# 2. native proper search: active search enabled providers # 2. native proper search: active search enabled providers
provider_list = filter_list( provider_list = list(filter(
lambda p: p.is_active() and (p.enable_recentsearch, p.enable_backlog)[None is proper_dict], lambda p: p.is_active() and (p.enable_recentsearch, p.enable_backlog)[None is proper_dict],
sickgear.providers.sortedProviderList()) sickgear.providers.sortedProviderList()))
search_threads = [] search_threads = []
if None is proper_dict: if None is proper_dict:
@ -487,7 +487,7 @@ def _get_proper_list(aired_since_shows, # type: datetime.datetime
cur_provider.log_result('Propers', len(propers), '%s' % cur_provider.name) cur_provider.log_result('Propers', len(propers), '%s' % cur_provider.name)
return list_values(propers) return list(propers.values())
def _download_propers(proper_list): def _download_propers(proper_list):
@ -507,24 +507,24 @@ def _download_propers(proper_list):
# get verified list; sort the list of unique Propers for highest proper_level, newest first # get verified list; sort the list of unique Propers for highest proper_level, newest first
for cur_proper in sorted( for cur_proper in sorted(
filter_iter(lambda p: p not in consumed_proper, filter(lambda p: p not in consumed_proper,
# allows Proper to fail or be rejected and another to be tried (with a different name) # allows Proper to fail or be rejected and another to be tried (with a different name)
filter_iter(lambda p: _epid(p) not in downloaded_epid, proper_list)), filter(lambda p: _epid(p) not in downloaded_epid, proper_list)),
key=operator.attrgetter('properlevel', 'date'), reverse=True): # type: Proper key=operator.attrgetter('properlevel', 'date'), reverse=True): # type: Proper
epid = _epid(cur_proper) epid = _epid(cur_proper)
# if the show is in our list and there hasn't been a Proper already added for that particular episode # if the show is in our list and there hasn't been a Proper already added for that particular episode
# then add it to our list of Propers # then add it to our list of Propers
if epid not in map_list(_epid, verified_propers): if epid not in list(map(_epid, verified_propers)):
logger.log('Proper may be useful [%s]' % cur_proper.name) logger.log('Proper may be useful [%s]' % cur_proper.name)
verified_propers.add(cur_proper) verified_propers.add(cur_proper)
else: else:
# use Proper with the highest level # use Proper with the highest level
remove_propers = set() remove_propers = set()
map_consume(lambda vp: remove_propers.add(vp), map_consume(lambda vp: remove_propers.add(vp),
filter_iter(lambda p: (epid == _epid(p) and cur_proper.proper_level > p.proper_level), filter(lambda p: (epid == _epid(p) and cur_proper.proper_level > p.proper_level),
verified_propers)) verified_propers))
if remove_propers: if remove_propers:
verified_propers -= remove_propers verified_propers -= remove_propers

View file

@ -22,7 +22,6 @@ from .newznab import NewznabConstants
from .. import logger from .. import logger
import sickgear import sickgear
from _23 import filter_list, filter_iter
from six import iteritems, itervalues from six import iteritems, itervalues
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -50,7 +49,7 @@ for module in __all__:
try: try:
m = importlib.import_module('.' + module, 'sickgear.providers') m = importlib.import_module('.' + module, 'sickgear.providers')
globals().update({n: getattr(m, n) for n in m.__all__} if hasattr(m, '__all__') globals().update({n: getattr(m, n) for n in m.__all__} if hasattr(m, '__all__')
else dict(filter_iter(lambda t: '_' != t[0][0], iteritems(m.__dict__)))) else dict(filter(lambda t: '_' != t[0][0], iteritems(m.__dict__))))
except ImportError as e: except ImportError as e:
if 'custom' != module[0:6]: if 'custom' != module[0:6]:
raise e raise e
@ -74,12 +73,12 @@ def sortedProviderList():
newList.append(providerDict[curModule]) newList.append(providerDict[curModule])
if not sickgear.PROVIDER_ORDER: if not sickgear.PROVIDER_ORDER:
nzb = filter_list(lambda p: p.providerType == generic.GenericProvider.NZB, itervalues(providerDict)) nzb = list(filter(lambda p: p.providerType == generic.GenericProvider.NZB, itervalues(providerDict)))
tor = filter_list(lambda p: p.providerType != generic.GenericProvider.NZB, itervalues(providerDict)) tor = list(filter(lambda p: p.providerType != generic.GenericProvider.NZB, itervalues(providerDict)))
newList = sorted(filter_iter(lambda p: not p.anime_only, nzb), key=lambda v: v.get_id()) + \ newList = sorted(filter(lambda p: not p.anime_only, nzb), key=lambda v: v.get_id()) + \
sorted(filter_iter(lambda p: not p.anime_only, tor), key=lambda v: v.get_id()) + \ sorted(filter(lambda p: not p.anime_only, tor), key=lambda v: v.get_id()) + \
sorted(filter_iter(lambda p: p.anime_only, nzb), key=lambda v: v.get_id()) + \ sorted(filter(lambda p: p.anime_only, nzb), key=lambda v: v.get_id()) + \
sorted(filter_iter(lambda p: p.anime_only, tor), key=lambda v: v.get_id()) sorted(filter(lambda p: p.anime_only, tor), key=lambda v: v.get_id())
# add any modules that are missing from that list # add any modules that are missing from that list
for curModule in providerDict: for curModule in providerDict:
@ -119,7 +118,7 @@ def make_unique_list(p_list, d_list=None):
default_names = [d.name for d in d_list or []] default_names = [d.name for d in d_list or []]
p_list = filter_iter(lambda _x: _x.get_id() not in ['sick_beard_index'], p_list) p_list = filter(lambda _x: _x.get_id() not in ['sick_beard_index'], p_list)
for cur_p in p_list: for cur_p in p_list:
g_name = generic_provider_name(cur_p.name) g_name = generic_provider_name(cur_p.name)
g_url = generic_provider_url(cur_p.url) g_url = generic_provider_url(cur_p.url)
@ -139,7 +138,7 @@ def make_unique_list(p_list, d_list=None):
def getNewznabProviderList(data): def getNewznabProviderList(data):
# type: (AnyStr) -> List # type: (AnyStr) -> List
defaultList = [makeNewznabProvider(x) for x in getDefaultNewznabProviders().split('!!!')] defaultList = [makeNewznabProvider(x) for x in getDefaultNewznabProviders().split('!!!')]
providerList = make_unique_list(filter_list(lambda _x: _x, [makeNewznabProvider(x) for x in data.split('!!!')]), providerList = make_unique_list(list(filter(lambda _x: _x, [makeNewznabProvider(x) for x in data.split('!!!')])),
defaultList) defaultList)
providerDict = dict(zip([x.name for x in providerList], providerList)) providerDict = dict(zip([x.name for x in providerList], providerList))
@ -158,7 +157,7 @@ def getNewznabProviderList(data):
'server_type'): 'server_type'):
setattr(providerDict[curDefault.name], k, getattr(curDefault, k)) setattr(providerDict[curDefault.name], k, getattr(curDefault, k))
return filter_list(lambda _x: _x, providerList) return list(filter(lambda _x: _x, providerList))
def makeNewznabProvider(config_string): def makeNewznabProvider(config_string):
@ -189,9 +188,9 @@ def makeNewznabProvider(config_string):
def getTorrentRssProviderList(data): def getTorrentRssProviderList(data):
providerList = filter_list(lambda _x: _x, [makeTorrentRssProvider(x) for x in data.split('!!!')]) providerList = list(filter(lambda _x: _x, [makeTorrentRssProvider(x) for x in data.split('!!!')]))
return filter_list(lambda _x: _x, providerList) return list(filter(lambda _x: _x, providerList))
def makeTorrentRssProvider(config_string): def makeTorrentRssProvider(config_string):

View file

@ -25,7 +25,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -63,7 +62,6 @@ class AlphaRatioProvider(generic.TorrentProvider):
rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({'info': 'view', 'get': 'download'})]) rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({'info': 'view', 'get': 'download'})])
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['search'] % (search_string, ('&freetorrent=1', '')[not self.freeleech]) search_url = self.urls['search'] % (search_string, ('&freetorrent=1', '')[not self.freeleech])
html = self.get_url(search_url) html = self.get_url(search_url)

View file

@ -23,7 +23,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -67,7 +66,6 @@ class BitHDTVProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['search'] % (search_string, self._categories_string(mode, '%s', ',')) search_url = self.urls['search'] % (search_string, self._categories_string(mode, '%s', ','))
html = self.get_url(search_url, timeout=90) html = self.get_url(search_url, timeout=90)

View file

@ -25,7 +25,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import filter_iter, unidecode
from six import iteritems from six import iteritems
@ -107,7 +106,6 @@ class BlutopiaProvider(generic.TorrentProvider):
return results return results
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['search'] % ( search_url = self.urls['search'] % (
self._token, search_string.replace('.', ' '), self._categories_string(template=''), '', '', '') self._token, search_string.replace('.', ' '), self._categories_string(template=''), '', '', '')
@ -136,7 +134,7 @@ class BlutopiaProvider(generic.TorrentProvider):
marked = ','.join([x.attrs.get('data-original-title', '').lower() for x in tr.find_all( marked = ','.join([x.attrs.get('data-original-title', '').lower() for x in tr.find_all(
'i', attrs={'class': ['text-gold', 'fa-diamond', 'fa-certificate']})]) 'i', attrs={'class': ['text-gold', 'fa-diamond', 'fa-certificate']})])
# noinspection PyTypeChecker # noinspection PyTypeChecker
munged = ''.join(filter_iter(marked.__contains__, ['free', 'double', 'feat'])) munged = ''.join(filter(marked.__contains__, ['free', 'double', 'feat']))
# noinspection PyUnboundLocalVariable # noinspection PyUnboundLocalVariable
if ((non_marked and rc['filter'].search(munged)) or if ((non_marked and rc['filter'].search(munged)) or
(not non_marked and not rc['filter'].search(munged))): (not non_marked and not rc['filter'].search(munged))):

View file

@ -32,7 +32,6 @@ from bs4_parser import BS4Parser
from exceptions_helper import AuthException from exceptions_helper import AuthException
from json_helper import json_dumps from json_helper import json_dumps
from _23 import unidecode
from six import iteritems from six import iteritems
@ -201,7 +200,6 @@ class BTNProvider(generic.TorrentProvider):
del (self.session.headers['Referer']) del (self.session.headers['Referer'])
self.auth_html = True self.auth_html = True
search_string = unidecode(search_string)
search_url = self.urls['search'] % (search_string, self._categories_string(mode, 'filter_cat[%s]=1')) search_url = self.urls['search'] % (search_string, self._categories_string(mode, 'filter_cat[%s]=1'))
html = self.get_url(search_url, use_tmr_limit=False) html = self.get_url(search_url, use_tmr_limit=False)

View file

@ -23,7 +23,7 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import b64decodestring, unidecode from _23 import b64decodestring
from six import iteritems from six import iteritems
@ -62,7 +62,6 @@ class EztvProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['browse'] % search_string if 'Cache' == mode else \ search_url = self.urls['browse'] % search_string if 'Cache' == mode else \
self.urls['search'] % search_string.replace('.', ' ') self.urls['search'] % search_string.replace('.', ' ')

View file

@ -25,7 +25,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
FLTAG = r'</a>\s+<img[^>]+%s[^<]+<br' FLTAG = r'</a>\s+<img[^>]+%s[^<]+<br'
@ -78,7 +77,6 @@ class FanoProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
rc['cats'] = re.compile('(?i)cat=(?:%s)' % self._categories_string(mode, template='', delimiter='|')) rc['cats'] = re.compile('(?i)cat=(?:%s)' % self._categories_string(mode, template='', delimiter='|'))
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_string = '+'.join(rc['abd'].sub(r'%22\1%22', search_string).split()) search_string = '+'.join(rc['abd'].sub(r'%22\1%22', search_string).split())
search_url = self.urls['search'] % (search_string, self._categories_string(mode)) search_url = self.urls['search'] % (search_string, self._categories_string(mode))

View file

@ -23,7 +23,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -59,7 +58,6 @@ class FLProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
rc['cats'] = re.compile('(?i)cat=(?:%s)' % self._categories_string(mode, template='', delimiter='|')) rc['cats'] = re.compile('(?i)cat=(?:%s)' % self._categories_string(mode, template='', delimiter='|'))
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
html = self.get_url(self.urls['search'] % ('+'.join(search_string.split()), html = self.get_url(self.urls['search'] % ('+'.join(search_string.split()),
self._categories_string(mode, template='cats[]=%s'))) self._categories_string(mode, template='cats[]=%s')))

View file

@ -24,7 +24,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -64,7 +63,6 @@ class FunFileProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
rc['cats'] = re.compile('(?i)cat=(?:%s)' % self._categories_string(mode, template='', delimiter='|')) rc['cats'] = re.compile('(?i)cat=(?:%s)' % self._categories_string(mode, template='', delimiter='|'))
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['search'] % (self._categories_string(mode), search_string) search_url = self.urls['search'] % (self._categories_string(mode), search_string)
html = self.get_url(search_url, timeout=self.url_timeout) html = self.get_url(search_url, timeout=self.url_timeout)

View file

@ -49,8 +49,8 @@ from lxml_etree import etree
import requests import requests
import requests.cookies import requests.cookies
from _23 import decode_bytes, filter_list, filter_iter, make_btih, map_list, quote, quote_plus, urlparse from _23 import decode_bytes, make_btih, quote, quote_plus, urlparse
from six import iteritems, iterkeys, itervalues, PY2, string_types from six import iteritems, iterkeys, itervalues, string_types
from sg_helpers import try_int from sg_helpers import try_int
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -978,11 +978,6 @@ class GenericProvider(object):
def _link(self, url, url_tmpl=None, url_quote=None): def _link(self, url, url_tmpl=None, url_quote=None):
url = '%s' % url # ensure string type url = '%s' % url # ensure string type
if url and not re.match('(?i)magnet:', url): if url and not re.match('(?i)magnet:', url):
if PY2:
try:
url = url.encode('utf-8')
except (BaseException, Exception):
pass
url = url.strip().replace('&amp;', '&') url = url.strip().replace('&amp;', '&')
if not url: if not url:
url = '' url = ''
@ -1017,8 +1012,8 @@ class GenericProvider(object):
((any([cell.get_text()]) and any([rc[x].search(cell.get_text()) for x in iterkeys(rc)]) and cell.get_text()) ((any([cell.get_text()]) and any([rc[x].search(cell.get_text()) for x in iterkeys(rc)]) and cell.get_text())
or (cell.attrs.get('id') and any([rc[x].search(cell['id']) for x in iterkeys(rc)]) and cell['id']) or (cell.attrs.get('id') and any([rc[x].search(cell['id']) for x in iterkeys(rc)]) and cell['id'])
or (cell.attrs.get('title') and any([rc[x].search(cell['title']) for x in iterkeys(rc)]) and cell['title']) or (cell.attrs.get('title') and any([rc[x].search(cell['title']) for x in iterkeys(rc)]) and cell['title'])
or next(iter(set(filter_iter(lambda rz: any([rz]), [ or next(iter(set(filter(lambda rz: any([rz]), [
next(iter(set(filter_iter(lambda ry: any([ry]), [ next(iter(set(filter(lambda ry: any([ry]), [
cell.find(tag, **p) for p in [{attr: rc[x]} for x in iterkeys(rc)]]))), {}).get(attr) cell.find(tag, **p) for p in [{attr: rc[x]} for x in iterkeys(rc)]]))), {}).get(attr)
for (tag, attr) in [ for (tag, attr) in [
('img', 'title'), ('img', 'src'), ('i', 'title'), ('i', 'class'), ('img', 'title'), ('img', 'src'), ('i', 'title'), ('i', 'class'),
@ -1035,7 +1030,7 @@ class GenericProvider(object):
for k, r in iteritems(rc): for k, r in iteritems(rc):
if k not in results: if k not in results:
for name in filter_iter(lambda v: any([v]) and r.search(v), all_headers[::-1]): for name in filter(lambda v: any([v]) and r.search(v), all_headers[::-1]):
results[k] = all_headers.index(name) - len(all_headers) results[k] = all_headers.index(name) - len(all_headers)
break break
@ -1384,11 +1379,11 @@ class GenericProvider(object):
:param count: count of successfully processed items :param count: count of successfully processed items
:param url: source url of item(s) :param url: source url of item(s)
""" """
stats = map_list(lambda arg: ('_reject_%s' % arg[0], arg[1]), stats = list(map(lambda arg: ('_reject_%s' % arg[0], arg[1]),
filter_iter(lambda _arg: all([getattr(self, '_reject_%s' % _arg[0], None)]), filter(lambda _arg: all([getattr(self, '_reject_%s' % _arg[0], None)]),
(('seed', '%s <min seeders'), ('leech', '%s <min leechers'), (('seed', '%s <min seeders'), ('leech', '%s <min leechers'),
('notfree', '%s not freeleech'), ('unverified', '%s unverified'), ('notfree', '%s not freeleech'), ('unverified', '%s unverified'),
('container', '%s unwanted containers')))) ('container', '%s unwanted containers')))))
rejects = ', '.join([(text % getattr(self, attr, '')).strip() for attr, text in stats]) rejects = ', '.join([(text % getattr(self, attr, '')).strip() for attr, text in stats])
for (attr, _) in stats: for (attr, _) in stats:
setattr(self, attr, None) setattr(self, attr, None)
@ -1684,7 +1679,7 @@ class TorrentProvider(GenericProvider):
def _reject_item(self, seeders=0, leechers=0, freeleech=None, verified=None, container=None): def _reject_item(self, seeders=0, leechers=0, freeleech=None, verified=None, container=None):
reject = False reject = False
for condition, attr in filter_iter(lambda arg: all([arg[0]]), ( for condition, attr in filter(lambda arg: all([arg[0]]), (
(seeders < getattr(self, 'minseed', 0), 'seed'), (seeders < getattr(self, 'minseed', 0), 'seed'),
(leechers < getattr(self, 'minleech', 0), 'leech'), (leechers < getattr(self, 'minleech', 0), 'leech'),
(all([freeleech]), 'notfree'), (all([freeleech]), 'notfree'),
@ -1889,13 +1884,13 @@ class TorrentProvider(GenericProvider):
url_exclude = url_exclude or [] url_exclude = url_exclude or []
if url_exclude: if url_exclude:
urls = urls[1:] urls = urls[1:]
urls = filter_list(lambda u: u not in url_exclude, urls) urls = list(filter(lambda u: u not in url_exclude, urls))
break break
if not urls: if not urls:
setattr(sickgear, seen_attr, list(set(getattr(sickgear, seen_attr, []) + [self.__module__]))) setattr(sickgear, seen_attr, list(set(getattr(sickgear, seen_attr, []) + [self.__module__])))
if not urls: if not urls:
urls = filter_list(lambda uh: 'http' in uh, getattr(self, 'url_home', [])) urls = list(filter(lambda uh: 'http' in uh, getattr(self, 'url_home', [])))
return urls return urls
@ -1903,8 +1898,7 @@ class TorrentProvider(GenericProvider):
@staticmethod @staticmethod
def _decode(data, c): def _decode(data, c):
try: try:
fx = (lambda x: x, lambda x: str(x))[PY2] result = ''.join(chr(int(bytearray([(8 * c)[i] ^ x for i, x in enumerate(data)])[i:i + 2], 16))
result = ''.join(chr(int(fx(bytearray([(8 * c)[i] ^ x for i, x in enumerate(data)])[i:i + 2]), 16))
for i in range(0, len(data), 2)) for i in range(0, len(data), 2))
except (BaseException, Exception): except (BaseException, Exception):
result = '|' result = '|'
@ -1932,10 +1926,10 @@ class TorrentProvider(GenericProvider):
return url_base return url_base
url_list = self._decode_urls(url_exclude) url_list = self._decode_urls(url_exclude)
if not url_list and getattr(self, 'url_edit', None) or not any(filter_iter(lambda u: 10 < len(u), url_list)): if not url_list and getattr(self, 'url_edit', None) or not any(filter(lambda u: 10 < len(u), url_list)):
return None return None
url_list = map_list(lambda u: '%s/' % u.rstrip('/'), url_list) url_list = list(map(lambda u: '%s/' % u.rstrip('/'), url_list))
last_url, expire = sickgear.PROVIDER_HOMES.get(self.get_id(), ('', None)) last_url, expire = sickgear.PROVIDER_HOMES.get(self.get_id(), ('', None))
url_drop = (url_exclude or []) + getattr(self, 'url_drop', []) url_drop = (url_exclude or []) + getattr(self, 'url_drop', [])
if url_drop and any([url in last_url for url in url_drop]): # deprecate url if url_drop and any([url in last_url for url in url_drop]): # deprecate url
@ -1970,8 +1964,8 @@ class TorrentProvider(GenericProvider):
return cur_url return cur_url
seen_attr = 'PROVIDER_SEEN' seen_attr = 'PROVIDER_SEEN'
setattr(sickgear, seen_attr, filter_list(lambda u: self.__module__ not in u, setattr(sickgear, seen_attr, list(filter(lambda u: self.__module__ not in u,
getattr(sickgear, seen_attr, []))) getattr(sickgear, seen_attr, []))))
self.failure_count = 3 * bool(failure_count) self.failure_count = 3 * bool(failure_count)
if self.should_skip(): if self.should_skip():

View file

@ -25,7 +25,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -76,7 +75,6 @@ class HDSpaceProvider(generic.TorrentProvider):
log = '%sing (%s) ' % (('keep', 'skipp')[non_marked], ', '.join([self.may_filter[f][0] for f in filters])) log = '%sing (%s) ' % (('keep', 'skipp')[non_marked], ', '.join([self.may_filter[f][0] for f in filters]))
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['browse'] + self._categories_string(template='', delimiter=';') search_url = self.urls['browse'] + self._categories_string(template='', delimiter=';')
if 'Cache' != mode: if 'Cache' != mode:

View file

@ -25,7 +25,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -80,7 +79,6 @@ class HDTorrentsProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
rc['cats'] = re.compile('(?i)category=(?:%s)' % self._categories_string(mode, template='', delimiter='|')) rc['cats'] = re.compile('(?i)category=(?:%s)' % self._categories_string(mode, template='', delimiter='|'))
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['search'] % ( search_url = self.urls['search'] % (
search_string, search_string,
self._categories_string(mode, template='category[]=%s') self._categories_string(mode, template='category[]=%s')

View file

@ -27,7 +27,6 @@ from ..helpers import try_int
import exceptions_helper import exceptions_helper
import feedparser import feedparser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -72,7 +71,6 @@ class ImmortalSeedProvider(generic.TorrentProvider):
'size': r'size[^\d/]+([^/]+)', 'get': '(.*download.*)', 'title': r'NUKED\b\.(.*)$'})]) 'size': r'size[^\d/]+([^/]+)', 'get': '(.*download.*)', 'title': r'NUKED\b\.(.*)$'})])
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_string = search_string.replace(' ', '.') search_string = search_string.replace(' ', '.')
search_url = self.urls['search'] % ( search_url = self.urls['search'] % (

View file

@ -24,7 +24,7 @@ from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import b64decodestring, unidecode from _23 import b64decodestring
from six import iteritems from six import iteritems
@ -77,7 +77,6 @@ class IPTorrentsProvider(generic.TorrentProvider):
urls = [] urls = []
for search_string in search_params[mode]: for search_string in search_params[mode]:
urls += [[]] urls += [[]]
search_string = unidecode(search_string) or search_string
for page in range((3, 5)['Cache' == mode])[1:]: for page in range((3, 5)['Cache' == mode])[1:]:
# URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
urls[-1] += [self.urls['search'] % ( urls[-1] += [self.urls['search'] % (

View file

@ -24,7 +24,7 @@ from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import b64decodestring, quote_plus, unidecode from _23 import b64decodestring, quote_plus
class LimeTorrentsProvider(generic.TorrentProvider): class LimeTorrentsProvider(generic.TorrentProvider):
@ -61,8 +61,6 @@ class LimeTorrentsProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['browse'] if 'Cache' == mode \ search_url = self.urls['browse'] if 'Cache' == mode \
else self.urls['search'] % (quote_plus(search_string)) else self.urls['search'] % (quote_plus(search_string))

View file

@ -24,7 +24,6 @@ from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -53,7 +52,6 @@ class MagnetDLProvider(generic.TorrentProvider):
for search_string in search_params[mode]: for search_string in search_params[mode]:
urls = [self.urls['browse'], self.urls['browse'] + '2'] urls = [self.urls['browse'], self.urls['browse'] + '2']
if 'Cache' != mode: if 'Cache' != mode:
search_string = unidecode(search_string)
urls = [self.urls['search'] % re.sub(r'[.\s]+', ' ', search_string)] urls = [self.urls['search'] % re.sub(r'[.\s]+', ' ', search_string)]
html = '' html = ''

View file

@ -23,8 +23,6 @@ from . import generic
from .. import logger from .. import logger
from ..helpers import try_int from ..helpers import try_int
from _23 import unidecode
class MilkieProvider(generic.TorrentProvider): class MilkieProvider(generic.TorrentProvider):
@ -71,7 +69,6 @@ class MilkieProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = '' search_url = ''
data_json, sess = None, None data_json, sess = None, None

View file

@ -26,7 +26,6 @@ from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -67,7 +66,7 @@ class MoreThanProvider(generic.TorrentProvider):
for (k, v) in iteritems({'info': r'torrents.php\?id', 'get': 'download', 'nuked': 'nuked'})]) for (k, v) in iteritems({'info': r'torrents.php\?id', 'get': 'download', 'nuked': 'nuked'})])
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string).replace('.', ' ') search_string = search_string.replace('.', ' ')
search_url = self.urls['search'] % (search_string, search_url = self.urls['search'] % (search_string,
self._categories_string(mode, template='filter_cat[%s]=1')) self._categories_string(mode, template='filter_cat[%s]=1'))

View file

@ -26,7 +26,6 @@ from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -66,7 +65,6 @@ class NcoreProvider(generic.TorrentProvider):
'list': '.*?torrent_all', 'info': 'details', 'key': 'key=([^"]+)">Torrent let'})]) 'list': '.*?torrent_all', 'info': 'details', 'key': 'key=([^"]+)">Torrent let'})])
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['search'] % search_string search_url = self.urls['search'] % search_string
# fetches 15 results by default, and up to 100 if allowed in user profile # fetches 15 results by default, and up to 100 if allowed in user profile

View file

@ -25,7 +25,7 @@ from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from json_helper import json_dumps from json_helper import json_dumps
from _23 import filter_list, unidecode, unquote_plus from _23 import unquote_plus
from six import iteritems from six import iteritems
@ -83,7 +83,6 @@ class NebulanceProvider(generic.TorrentProvider):
rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({'nodots': r'[\.\s]+'})]) rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({'nodots': r'[\.\s]+'})])
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['browse'] % (self.user_authkey, self.user_passkey) search_url = self.urls['browse'] % (self.user_authkey, self.user_passkey)
if 'Cache' != mode: if 'Cache' != mode:
@ -164,7 +163,7 @@ class NebulanceProvider(generic.TorrentProvider):
('(?i)%s(Proper)%s' % (bl, br), r'`\1`'), (r'%s\s*%s' % (bl, br), '`')]: ('(?i)%s(Proper)%s' % (bl, br), r'`\1`'), (r'%s\s*%s' % (bl, br), '`')]:
title = re.sub(r[0], r[1], title) title = re.sub(r[0], r[1], title)
grp = filter_list(lambda rn: '.release' in rn.lower(), item['tags']) grp = list(filter(lambda rn: '.release' in rn.lower(), item['tags']))
title = '%s%s-%s' % (('', t[0])[1 < len(t)], title, title = '%s%s-%s' % (('', t[0])[1 < len(t)], title,
(any(grp) and grp[0] or 'nogrp').upper().replace('.RELEASE', '')) (any(grp) and grp[0] or 'nogrp').upper().replace('.RELEASE', ''))
@ -186,7 +185,7 @@ class NebulanceProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unquote_plus(unidecode(search_string)) search_string = unquote_plus(search_string)
params = {'release': search_string} params = {'release': search_string}
if 'Cache' == mode: if 'Cache' == mode:

View file

@ -22,7 +22,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -51,7 +50,6 @@ class NyaaProvider(generic.TorrentProvider):
rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({'info': 'view', 'get': '(?:torrent|magnet:)'})]) rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({'info': 'view', 'get': '(?:torrent|magnet:)'})])
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['search'] % ((0, 2)[self.confirmed], search_string.replace('.', ' ')) search_url = self.urls['search'] % ((0, 2)[self.confirmed], search_string.replace('.', ' '))
html = self.get_url(search_url) html = self.get_url(search_url)

View file

@ -23,7 +23,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -58,7 +57,6 @@ class PreToMeProvider(generic.TorrentProvider):
rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({'info': 'details', 'get': 'download'})]) rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({'info': 'details', 'get': 'download'})])
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['search'] % search_string search_url = self.urls['search'] % search_string
html = self.get_url(search_url) html = self.get_url(search_url)

View file

@ -25,7 +25,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import filter_iter, unidecode
from six import iteritems from six import iteritems
@ -93,7 +92,6 @@ class PrivateHDProvider(generic.TorrentProvider):
return results return results
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['search'] % ( search_url = self.urls['search'] % (
'+'.join(search_string.split()), self._categories_string(mode, '')) '+'.join(search_string.split()), self._categories_string(mode, ''))
@ -120,7 +118,7 @@ class PrivateHDProvider(generic.TorrentProvider):
if any(self.filter): if any(self.filter):
marked = ','.join([x.attrs.get('title', '').lower() for x in tr.find_all( marked = ','.join([x.attrs.get('title', '').lower() for x in tr.find_all(
'i', attrs={'class': ['fa-star', 'fa-diamond', 'fa-star-half-o']})]) 'i', attrs={'class': ['fa-star', 'fa-diamond', 'fa-star-half-o']})])
munged = ''.join(filter_iter(marked.__contains__, ['free', 'half', 'double'])) munged = ''.join(filter(marked.__contains__, ['free', 'half', 'double']))
# noinspection PyUnboundLocalVariable # noinspection PyUnboundLocalVariable
if ((non_marked and rc['filter'].search(munged)) or if ((non_marked and rc['filter'].search(munged)) or
(not non_marked and not rc['filter'].search(munged))): (not non_marked and not rc['filter'].search(munged))):

View file

@ -26,7 +26,6 @@ from .. import logger
from ..helpers import anon_url, try_int from ..helpers import anon_url, try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -82,7 +81,6 @@ class PTFProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
rc['cats'] = re.compile('(?i)cat=(?:%s)' % self._categories_string(mode, template='', delimiter='|')) rc['cats'] = re.compile('(?i)cat=(?:%s)' % self._categories_string(mode, template='', delimiter='|'))
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['search'] % ('+'.join(search_string.split()), self._categories_string(mode)) search_url = self.urls['search'] % ('+'.join(search_string.split()), self._categories_string(mode))
html = self.get_url(search_url) html = self.get_url(search_url)

View file

@ -23,7 +23,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -61,7 +60,6 @@ class RevTTProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
rc['cats'] = re.compile('(?i)cat=(?:%s)' % self._categories_string(mode, template='', delimiter='|')) rc['cats'] = re.compile('(?i)cat=(?:%s)' % self._categories_string(mode, template='', delimiter='|'))
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
html = self.get_url(self.urls['search'] % ('+'.join(search_string.split()), html = self.get_url(self.urls['search'] % ('+'.join(search_string.split()),
self._categories_string(mode))) self._categories_string(mode)))

View file

@ -23,7 +23,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -62,7 +61,6 @@ class SceneHDProvider(generic.TorrentProvider):
'nuked': 'nuke', 'filter': 'free'})]) 'nuked': 'nuke', 'filter': 'free'})])
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['search'] % (search_string, self._categories_string(mode, '%s', ',')) search_url = self.urls['search'] % (search_string, self._categories_string(mode, '%s', ','))
html = self.get_url(search_url, timeout=90) html = self.get_url(search_url, timeout=90)

View file

@ -23,7 +23,6 @@ from .. import logger
from ..helpers import anon_url, try_int from ..helpers import anon_url, try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -70,7 +69,6 @@ class SceneTimeProvider(generic.TorrentProvider):
urls = [] urls = []
for search_string in search_params[mode]: for search_string in search_params[mode]:
urls += [[]] urls += [[]]
search_string = unidecode(search_string)
search_url = self.urls['search'] % (self._categories_string(), search_url = self.urls['search'] % (self._categories_string(),
'+'.join(search_string.replace('.', ' ').split()), '+'.join(search_string.replace('.', ' ').split()),
('', '&freeleech=on')[self.freeleech]) ('', '&freeleech=on')[self.freeleech])

View file

@ -26,7 +26,7 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode, unquote_plus from _23 import unquote_plus
from six import iteritems, text_type from six import iteritems, text_type
@ -75,7 +75,6 @@ class ShazbatProvider(generic.TorrentProvider):
if self.should_skip(): if self.should_skip():
return results return results
else: else:
search_string = unidecode(search_string)
search_string = search_string.replace(show_detail, '').strip() search_string = search_string.replace(show_detail, '').strip()
search_url = self.urls['search'] % search_string search_url = self.urls['search'] % search_string
html = self.get_url(search_url) html = self.get_url(search_url)

View file

@ -25,7 +25,7 @@ from .. import logger
from ..helpers import sanitize_scene_name from ..helpers import sanitize_scene_name
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import decode_str, filter_list, html_unescape, list_keys, list_values, unidecode from _23 import decode_str, html_unescape
from six import iteritems, iterkeys from six import iteritems, iterkeys
@ -51,11 +51,11 @@ class ShowRSSProvider(generic.TorrentProvider):
def logged_in(self, y): def logged_in(self, y):
if all([None is y or 'logout' in y, if all([None is y or 'logout' in y,
bool(filter_list(lambda c: 'remember_web_' in c, iterkeys(self.session.cookies)))]): bool(list(filter(lambda c: 'remember_web_' in c, iterkeys(self.session.cookies))))]):
if None is not y: if None is not y:
self.shows = dict(re.findall(r'<option value="(\d+)">(.*?)</option>', y)) self.shows = dict(re.findall(r'<option value="(\d+)">(.*?)</option>', y))
for k, v in iteritems(self.shows): for k, v in iteritems(self.shows):
self.shows[k] = sanitize_scene_name(html_unescape(unidecode(decode_str(v)))) self.shows[k] = sanitize_scene_name(html_unescape(decode_str(v)))
return True return True
return False return False
@ -74,13 +74,12 @@ class ShowRSSProvider(generic.TorrentProvider):
if 'Cache' == mode: if 'Cache' == mode:
search_url = self.urls['browse'] search_url = self.urls['browse']
else: else:
search_string = unidecode(search_string) show_name = list(filter(lambda x: x.lower() == re.sub(r'\s.*', '', search_string.lower()),
show_name = filter_list(lambda x: x.lower() == re.sub(r'\s.*', '', search_string.lower()), list(self.shows.values())))
list_values(self.shows))
if not show_name: if not show_name:
continue continue
search_url = self.urls['search'] % list_keys(self.shows)[ search_url = self.urls['search'] % list(self.shows)[
list_values(self.shows).index(show_name[0])] list(self.shows.values()).index(show_name[0])]
if search_url in urls: if search_url in urls:
continue continue

View file

@ -25,7 +25,7 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from json_helper import json_loads from json_helper import json_loads
from _23 import b64encodestring, filter_iter, map_list, quote, unidecode from _23 import b64encodestring, quote
from six import iteritems from six import iteritems
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -74,7 +74,7 @@ class SnowflProvider(generic.TorrentProvider):
params = dict(token=token[0], ent=token[1]) params = dict(token=token[0], ent=token[1])
if 'Cache' != mode: if 'Cache' != mode:
params.update({'ss': quote_fx(unidecode(search_string))}) params.update({'ss': quote_fx(search_string)})
data_json = None data_json = None
vals = [i for i in range(3, 8)] vals = [i for i in range(3, 8)]
@ -92,13 +92,13 @@ class SnowflProvider(generic.TorrentProvider):
if self.should_skip(): if self.should_skip():
return results return results
for item in filter_iter(lambda di: re.match('(?i).*?(tv|television)', for item in filter(lambda di: re.match('(?i).*?(tv|television)',
di.get('type', '') or di.get('category', '')) di.get('type', '') or di.get('category', ''))
and (not self.confirmed or di.get('trusted') or di.get('verified')), and (not self.confirmed or di.get('trusted') or di.get('verified')),
data_json or {}): data_json or {}):
seeders, leechers, size = map_list(lambda arg: try_int( seeders, leechers, size = list(map(lambda arg: try_int(
*([item.get(arg[0]) if None is not item.get(arg[0]) else item.get(arg[1])]) * 2), *([item.get(arg[0]) if None is not item.get(arg[0]) else item.get(arg[1])]) * 2),
(('seeder', 'seed'), ('leecher', 'leech'), ('size', 'size'))) (('seeder', 'seed'), ('leecher', 'leech'), ('size', 'size'))))
if self._reject_item(seeders, leechers): if self._reject_item(seeders, leechers):
continue continue
title = item.get('name') or item.get('title') title = item.get('name') or item.get('title')
@ -163,7 +163,7 @@ class SnowflProvider(generic.TorrentProvider):
else: else:
from sickgear import providers from sickgear import providers
if 'torlock' in url.lower(): if 'torlock' in url.lower():
prov = next(filter_iter(lambda p: 'torlock' == p.name.lower(), (filter_iter( prov = next(filter(lambda p: 'torlock' == p.name.lower(), (filter(
lambda sp: sp.providerType == self.providerType, providers.sortedProviderList())))) lambda sp: sp.providerType == self.providerType, providers.sortedProviderList()))))
state = prov.enabled state = prov.enabled
prov.enabled = True prov.enabled = True

View file

@ -21,7 +21,6 @@ from . import generic
from ..helpers import try_int from ..helpers import try_int
from six import string_types from six import string_types
from _23 import filter_list, map_list, unidecode
class SpeedAppProvider(generic.TorrentProvider): class SpeedAppProvider(generic.TorrentProvider):
@ -55,14 +54,15 @@ class SpeedAppProvider(generic.TorrentProvider):
self.perms_needed = self.perms self.perms_needed = self.perms
if isinstance(resp, dict) and isinstance(resp.get('scopes'), list): if isinstance(resp, dict) and isinstance(resp.get('scopes'), list):
self._authd = True self._authd = True
self.perms_needed = filter_list(lambda x: True is not x, [p in resp.get('scopes') or p for p in self.perms]) self.perms_needed = list(filter(lambda x: True is not x,
[p in resp.get('scopes') or p for p in self.perms]))
if not self.perms_needed: if not self.perms_needed:
self.categories = None self.categories = None
resp = self.get_url(self.urls['cats'], skip_auth=True, parse_json=True, headers=self.auth_header()) resp = self.get_url(self.urls['cats'], skip_auth=True, parse_json=True, headers=self.auth_header())
if isinstance(resp, list): if isinstance(resp, list):
categories = [category['id'] for category in filter_list( categories = [category['id'] for category in list(filter(
lambda c: isinstance(c.get('id'), int) and isinstance(c.get('name'), string_types) lambda c: isinstance(c.get('id'), int) and isinstance(c.get('name'), string_types)
and c.get('name').upper() in ('TV PACKS', 'TV HD', 'TV SD'), resp)] and c.get('name').upper() in ('TV PACKS', 'TV HD', 'TV SD'), resp))]
self.categories = {'Cache': categories, 'Episode': categories, 'Season': categories} self.categories = {'Cache': categories, 'Episode': categories, 'Season': categories}
return not any(self.perms_needed) return not any(self.perms_needed)
@ -81,7 +81,7 @@ class SpeedAppProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_url = self.urls['search'] % ( search_url = self.urls['search'] % (
unidecode(search_string), self._categories_string(mode, template='categories[]=%s')) search_string, self._categories_string(mode, template='categories[]=%s'))
data_json = self.get_url(search_url, skip_auth=True, parse_json=True, headers=self.auth_header()) data_json = self.get_url(search_url, skip_auth=True, parse_json=True, headers=self.auth_header())
if self.should_skip(): if self.should_skip():
@ -111,10 +111,10 @@ class SpeedAppProvider(generic.TorrentProvider):
('%s_api_key_tip' % self.get_id()) == key and \ ('%s_api_key_tip' % self.get_id()) == key and \
((not_authd or self.perms_needed) ((not_authd or self.perms_needed)
and ('create token at <a href="%sprofile/api-tokens">%s site</a><br>' and ('create token at <a href="%sprofile/api-tokens">%s site</a><br>'
'with perms %s' % (self.url_base, self.name, map_list( 'with perms %s' % (self.url_base, self.name, list(map(
lambda p: 't.read' in p and 'Read torrents' lambda p: 't.read' in p and 'Read torrents'
or 't.down' in p and 'Download torrents' or 't.down' in p and 'Download torrents'
or 'ch.read' in p and 'Read snatches', self.perms_needed))) or 'ch.read' in p and 'Read snatches', self.perms_needed))))
.replace('[', '').replace(']', '') .replace('[', '').replace(']', '')
or 'token is valid and required permissions are enabled') \ or 'token is valid and required permissions are enabled') \
or '' or ''

View file

@ -25,7 +25,7 @@ from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from requests.cookies import cookiejar_from_dict from requests.cookies import cookiejar_from_dict
from _23 import filter_list, quote, unquote from _23 import quote, unquote
from six import string_types, iteritems from six import string_types, iteritems
@ -63,12 +63,12 @@ class SpeedCDProvider(generic.TorrentProvider):
self.session.cookies.clear() self.session.cookies.clear()
json = self.get_url(self.urls['login_1'], skip_auth=True, json = self.get_url(self.urls['login_1'], skip_auth=True,
post_data={'username': self.username}, parse_json=True) post_data={'username': self.username}, parse_json=True)
resp = filter_list(lambda l: isinstance(l, list), json.get('Fs', [])) resp = list(filter(lambda l: isinstance(l, list), json.get('Fs', [])))
def get_html(_resp): def get_html(_resp):
for cur_item in _resp: for cur_item in _resp:
if isinstance(cur_item, list): if isinstance(cur_item, list):
_html = filter_list(lambda s: isinstance(s, string_types) and 'password' in s, cur_item) _html = list(filter(lambda s: isinstance(s, string_types) and 'password' in s, cur_item))
if not _html: if not _html:
_html = get_html(cur_item) _html = get_html(cur_item)
if _html: if _html:
@ -128,13 +128,13 @@ class SpeedCDProvider(generic.TorrentProvider):
cnt = len(items[mode]) cnt = len(items[mode])
try: try:
html = filter_list(lambda l: isinstance(l, list), data_json.get('Fs', [])) html = list(filter(lambda l: isinstance(l, list), data_json.get('Fs', [])))
while html: while html:
if html and all(isinstance(x, string_types) for x in html): if html and all(isinstance(x, string_types) for x in html):
str_lengths = [len(x) for x in html] str_lengths = [len(x) for x in html]
html = html[str_lengths.index(max(str_lengths))] html = html[str_lengths.index(max(str_lengths))]
break break
html = filter_list(lambda l: isinstance(l, list), html) html = list(filter(lambda l: isinstance(l, list), html))
if html and 0 < len(html): if html and 0 < len(html):
html = html[0] html = html[0]

View file

@ -25,7 +25,7 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import b64decodestring, unidecode from _23 import b64decodestring
from six import iteritems from six import iteritems
@ -90,7 +90,6 @@ class ThePirateBayProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
if 'Cache' != mode: if 'Cache' != mode:
search_url = self.urls['api'] % search_string search_url = self.urls['api'] % search_string

View file

@ -22,7 +22,7 @@ from .. import show_name_helpers, tvcache
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import filter_list, map_list, urlencode from _23 import urlencode
from six import iteritems from six import iteritems
@ -78,10 +78,10 @@ class TokyoToshokanProvider(generic.TorrentProvider):
info = top.find('td', class_='desc-top') info = top.find('td', class_='desc-top')
title = info and re.sub(r'[ .]{2,}', '.', info.get_text().strip()) title = info and re.sub(r'[ .]{2,}', '.', info.get_text().strip())
links = info and map_list(lambda l: l.get('href', ''), info.find_all('a')) or None links = info and list(map(lambda l: l.get('href', ''), info.find_all('a'))) or None
download_url = self._link( download_url = self._link(
(filter_list(lambda l: 'magnet:' in l, links) (list(filter(lambda l: 'magnet:' in l, links))
or filter_list(lambda l: not re.search(r'(magnet:|\.se).+', l), links))[0]) or list(filter(lambda l: not re.search(r'(magnet:|\.se).+', l), links)))[0])
except (AttributeError, TypeError, ValueError, IndexError): except (AttributeError, TypeError, ValueError, IndexError):
continue continue

View file

@ -23,7 +23,7 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import b64decodestring, quote_plus, unidecode from _23 import b64decodestring, quote_plus
from six import iteritems from six import iteritems
@ -66,8 +66,6 @@ class TorLockProvider(generic.TorrentProvider):
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['browse'] if 'Cache' == mode \ search_url = self.urls['browse'] if 'Cache' == mode \
else self.urls['search'] % (quote_plus(search_string).replace('+', '-')) else self.urls['search'] % (quote_plus(search_string).replace('+', '-'))

View file

@ -23,7 +23,6 @@ from .. import logger
from ..helpers import try_int from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -67,7 +66,6 @@ class TorrentingProvider(generic.TorrentProvider):
'get': 'download'})]) 'get': 'download'})])
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(search_string)
search_url = self.urls['search'] % (self._categories_string(), search_string) search_url = self.urls['search'] % (self._categories_string(), search_string)
html = self.get_url(search_url) html = self.get_url(search_url)

View file

@ -21,8 +21,7 @@ import re
from . import generic from . import generic
from ..helpers import anon_url, try_int from ..helpers import anon_url, try_int
from _23 import unidecode from six import iteritems
from six import iteritems, PY2
class TorrentLeechProvider(generic.TorrentProvider): class TorrentLeechProvider(generic.TorrentProvider):
@ -66,7 +65,7 @@ class TorrentLeechProvider(generic.TorrentProvider):
for page in range((3, 5)['Cache' == mode])[1:]: for page in range((3, 5)['Cache' == mode])[1:]:
urls[-1] += [self.urls[('search', 'browse')['Cache' == mode]] % { urls[-1] += [self.urls[('search', 'browse')['Cache' == mode]] % {
'cats': self._categories_string(mode, '', ','), 'cats': self._categories_string(mode, '', ','),
'query': unidecode(search_string) or search_string, 'query': search_string,
'x': '%spage/%s' % (('facets/tags:FREELEECH/', '')[not self.freeleech], page) 'x': '%spage/%s' % (('facets/tags:FREELEECH/', '')[not self.freeleech], page)
}] }]
results += self._search_urls(mode, last_recent_search, urls) results += self._search_urls(mode, last_recent_search, urls)
@ -125,8 +124,7 @@ class TorrentLeechProvider(generic.TorrentProvider):
download_url = None download_url = None
if dl and dl_id: if dl and dl_id:
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
download_url = self._link('download/%s/%s' % (dl_id, dl), download_url = self._link('download/%s/%s' % (dl_id, dl))
url_quote=PY2 and isinstance(dl, unicode) or None)
except (BaseException, Exception): except (BaseException, Exception):
continue continue

View file

@ -27,7 +27,7 @@ from ..helpers import try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from dateutil.parser import parse from dateutil.parser import parse
from _23 import unidecode, unquote_plus from _23 import unquote_plus
from six import iteritems from six import iteritems
@ -80,7 +80,7 @@ class TVChaosUKProvider(generic.TorrentProvider):
'info': r'/torrents?/(?P<tid>(?P<tid_num>\d{2,})[^"]*)', 'get': 'download'})]) 'info': r'/torrents?/(?P<tid>(?P<tid_num>\d{2,})[^"]*)', 'get': 'download'})])
for mode in search_params: for mode in search_params:
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = unidecode(unquote_plus(search_string)) search_string = unquote_plus(search_string)
vals = [i for i in range(5, 16)] vals = [i for i in range(5, 16)]
random.SystemRandom().shuffle(vals) random.SystemRandom().shuffle(vals)

View file

@ -25,7 +25,6 @@ from .. import logger
from ..helpers import has_anime, try_int from ..helpers import has_anime, try_int
from bs4_parser import BS4Parser from bs4_parser import BS4Parser
from _23 import unidecode
from six import iteritems from six import iteritems
@ -70,7 +69,6 @@ class XspeedsProvider(generic.TorrentProvider):
for search_string in search_params[mode]: for search_string in search_params[mode]:
search_string = search_string.replace(u'£', '%') search_string = search_string.replace(u'£', '%')
search_string = re.sub(r'[\s.]+', '%', search_string) search_string = re.sub(r'[\s.]+', '%', search_string)
search_string = unidecode(search_string)
kwargs = dict(post_data={'keywords': search_string, 'do': 'quick_sort', 'page': '0', kwargs = dict(post_data={'keywords': search_string, 'do': 'quick_sort', 'page': '0',
'category': '0', 'search_type': 't_name', 'sort': 'added', 'category': '0', 'search_type': 't_name', 'sort': 'added',

View file

@ -35,8 +35,8 @@ from .sgdatetime import timestamp_near
import lib.rarfile.rarfile as rarfile import lib.rarfile.rarfile as rarfile
from _23 import filter_iter, list_range, map_iter from _23 import list_range
from six import iteritems, PY2, text_type from six import iteritems, text_type
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
@ -303,7 +303,7 @@ def retrieve_exceptions():
list(cur_tvid_prodid))] list(cur_tvid_prodid))]
# if this exception isn't already in the DB then add it # if this exception isn't already in the DB then add it
for cur_exception_dict in filter_iter(lambda e: e not in existing_exceptions, exception_dict[cur_tvid_prodid]): for cur_exception_dict in filter(lambda e: e not in existing_exceptions, exception_dict[cur_tvid_prodid]):
try: try:
cur_exception, cur_season = next(iteritems(cur_exception_dict)) cur_exception, cur_season = next(iteritems(cur_exception_dict))
except (BaseException, Exception): except (BaseException, Exception):
@ -311,9 +311,6 @@ def retrieve_exceptions():
logger.log(traceback.format_exc(), logger.ERROR) logger.log(traceback.format_exc(), logger.ERROR)
continue continue
if PY2 and not isinstance(cur_exception, text_type):
cur_exception = text_type(cur_exception, 'utf-8', 'replace')
cl.append(['INSERT INTO scene_exceptions' cl.append(['INSERT INTO scene_exceptions'
' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)', ' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)',
list(cur_tvid_prodid) + [cur_exception, cur_season]]) list(cur_tvid_prodid) + [cur_exception, cur_season]])
@ -368,9 +365,6 @@ def update_scene_exceptions(tvid, prodid, scene_exceptions):
exceptionsCache[(tvid, prodid)][cur_season].append(cur_exception) exceptionsCache[(tvid, prodid)][cur_season].append(cur_exception)
if PY2 and not isinstance(cur_exception, text_type):
cur_exception = text_type(cur_exception, 'utf-8', 'replace')
my_db.action('INSERT INTO scene_exceptions' my_db.action('INSERT INTO scene_exceptions'
' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)', ' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)',
[tvid, prodid, cur_exception, cur_season]) [tvid, prodid, cur_exception, cur_season])
@ -489,7 +483,7 @@ def _anidb_exceptions_fetcher():
if should_refresh('anidb'): if should_refresh('anidb'):
logger.log(u'Checking for AniDB scene exception updates') logger.log(u'Checking for AniDB scene exception updates')
for cur_show_obj in filter_iter(lambda _s: _s.is_anime and TVINFO_TVDB == _s.tvid, sickgear.showList): for cur_show_obj in filter(lambda _s: _s.is_anime and TVINFO_TVDB == _s.tvid, sickgear.showList):
try: try:
anime = create_anidb_obj(name=cur_show_obj.name, tvdbid=cur_show_obj.prodid, autoCorrectName=True) anime = create_anidb_obj(name=cur_show_obj.name, tvdbid=cur_show_obj.prodid, autoCorrectName=True)
except (BaseException, Exception): except (BaseException, Exception):
@ -559,8 +553,8 @@ def _xem_get_ids(infosrc_name, xem_origin):
% (task.lower() % ('', 's'), infosrc_name, url), logger.ERROR) % (task.lower() % ('', 's'), infosrc_name, url), logger.ERROR)
else: else:
if 'success' == parsed_json.get('result', '') and 'data' in parsed_json: if 'success' == parsed_json.get('result', '') and 'data' in parsed_json:
xem_ids = list(set(filter_iter(lambda prodid: 0 < prodid, xem_ids = list(set(filter(lambda prodid: 0 < prodid,
map_iter(lambda pid: helpers.try_int(pid), parsed_json['data'])))) map(lambda pid: helpers.try_int(pid), parsed_json['data']))))
if 0 == len(xem_ids): if 0 == len(xem_ids):
logger.log(u'Failed %s %s, no data items parsed from URL: %s' logger.log(u'Failed %s %s, no data items parsed from URL: %s'
% (task.lower() % ('', 's'), infosrc_name, url), logger.WARNING) % (task.lower() % ('', 's'), infosrc_name, url), logger.WARNING)

View file

@ -32,8 +32,6 @@ from .helpers import try_int
from .scene_exceptions import xem_ids_list from .scene_exceptions import xem_ids_list
from .sgdatetime import timestamp_near from .sgdatetime import timestamp_near
from _23 import filter_iter, map_list
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
from typing import Dict, List, Optional, Tuple, Union from typing import Dict, List, Optional, Tuple, Union
@ -718,8 +716,8 @@ def _get_absolute_numbering_for_show(tbl, tvid, prodid):
""" % (tbl, ('indexer_id', 'showid')['tv_episodes' == tbl]), [int(tvid), int(prodid)]) """ % (tbl, ('indexer_id', 'showid')['tv_episodes' == tbl]), [int(tvid), int(prodid)])
for cur_row in sql_result: for cur_row in sql_result:
season, episode, abs_num = map_list(lambda x: try_int(cur_row[x], None), season, episode, abs_num = list(map(lambda x: try_int(cur_row[x], None),
('season', 'episode', 'absolute_number')) ('season', 'episode', 'absolute_number')))
if None is season and None is episode and None is not abs_num: if None is season and None is episode and None is not abs_num:
season, episode, _ = _get_sea(tvid, prodid, absolute_number=abs_num) season, episode, _ = _get_sea(tvid, prodid, absolute_number=abs_num)
@ -815,7 +813,7 @@ def xem_refresh(tvid, prodid, force=False):
return return
if 'success' in parsed_json['result']: if 'success' in parsed_json['result']:
cl = map_list(lambda entry: [ cl = list(map(lambda entry: [
""" """
UPDATE tv_episodes UPDATE tv_episodes
SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? SET scene_season = ?, scene_episode = ?, scene_absolute_number = ?
@ -824,7 +822,7 @@ def xem_refresh(tvid, prodid, force=False):
for v in ('season', 'episode', 'absolute')] for v in ('season', 'episode', 'absolute')]
+ [tvid, prodid] + [tvid, prodid]
+ [entry.get(xem_origin).get(v) for v in ('season', 'episode')] + [entry.get(xem_origin).get(v) for v in ('season', 'episode')]
], filter_iter(lambda x: 'scene' in x, parsed_json['data'])) ], filter(lambda x: 'scene' in x, parsed_json['data'])))
if 0 < len(cl): if 0 < len(cl):
my_db = db.DBConnection() my_db = db.DBConnection()

View file

@ -34,7 +34,6 @@ from .common import DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER, MULTI_
from .providers.generic import GenericProvider from .providers.generic import GenericProvider
from .tv import TVEpisode, TVShow from .tv import TVEpisode, TVShow
from _23 import filter_list, filter_iter, list_values
from six import iteritems, itervalues, string_types from six import iteritems, itervalues, string_types
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -590,7 +589,7 @@ def search_for_needed_episodes(ep_obj_list):
orig_thread_name = threading.current_thread().name orig_thread_name = threading.current_thread().name
providers = filter_list(lambda x: x.is_active() and x.enable_recentsearch, sickgear.providers.sortedProviderList()) providers = list(filter(lambda x: x.is_active() and x.enable_recentsearch, sickgear.providers.sortedProviderList()))
for cur_provider in providers: for cur_provider in providers:
threading.current_thread().name = '%s :: [%s]' % (orig_thread_name, cur_provider.name) threading.current_thread().name = '%s :: [%s]' % (orig_thread_name, cur_provider.name)
@ -646,7 +645,7 @@ def search_for_needed_episodes(ep_obj_list):
logger.log('Failed recent search of %s enabled provider%s. More info in debug log.' % ( logger.log('Failed recent search of %s enabled provider%s. More info in debug log.' % (
len(providers), helpers.maybe_plural(providers)), logger.ERROR) len(providers), helpers.maybe_plural(providers)), logger.ERROR)
return list_values(found_results) return list(found_results.values())
def can_reject(release_name): def can_reject(release_name):
@ -738,10 +737,10 @@ def _search_provider_thread(provider, provider_results, show_obj, ep_obj_list, m
# make a list of all the results for this provider # make a list of all the results for this provider
for cur_search_result in search_result_list: for cur_search_result in search_result_list:
# skip non-tv crap # skip non-tv crap
search_result_list[cur_search_result] = filter_list( search_result_list[cur_search_result] = list(filter(
lambda ep_item: ep_item.show_obj == show_obj and show_name_helpers.pass_wordlist_checks( lambda ep_item: ep_item.show_obj == show_obj and show_name_helpers.pass_wordlist_checks(
ep_item.name, parse=False, indexer_lookup=False, show_obj=ep_item.show_obj), ep_item.name, parse=False, indexer_lookup=False, show_obj=ep_item.show_obj),
search_result_list[cur_search_result]) search_result_list[cur_search_result]))
if cur_search_result in provider_results: if cur_search_result in provider_results:
provider_results[cur_search_result] += search_result_list[cur_search_result] provider_results[cur_search_result] += search_result_list[cur_search_result]
@ -941,7 +940,7 @@ def search_providers(
# if not, break it apart and add them as the lowest priority results # if not, break it apart and add them as the lowest priority results
individual_results = nzbSplitter.splitResult(best_season_result) individual_results = nzbSplitter.splitResult(best_season_result)
for cur_result in filter_iter( for cur_result in filter(
lambda r: r.show_obj == show_obj and show_name_helpers.pass_wordlist_checks( lambda r: r.show_obj == show_obj and show_name_helpers.pass_wordlist_checks(
r.name, parse=False, indexer_lookup=False, show_obj=r.show_obj), individual_results): r.name, parse=False, indexer_lookup=False, show_obj=r.show_obj), individual_results):
ep_num = None ep_num = None

View file

@ -28,7 +28,6 @@ from .search import wanted_episodes
from .sgdatetime import SGDatetime, timestamp_near from .sgdatetime import SGDatetime, timestamp_near
from .tv import TVidProdid, TVEpisode, TVShow from .tv import TVidProdid, TVEpisode, TVShow
from _23 import filter_list, map_iter, map_list
from six import iteritems, itervalues, moves from six import iteritems, itervalues, moves
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -212,7 +211,7 @@ class BacklogSearcher(object):
any_torrent_enabled = continued_backlog = False any_torrent_enabled = continued_backlog = False
if not force and standard_backlog and (datetime.datetime.now() - datetime.datetime.fromtimestamp( if not force and standard_backlog and (datetime.datetime.now() - datetime.datetime.fromtimestamp(
self._get_last_runtime())) < datetime.timedelta(hours=23): self._get_last_runtime())) < datetime.timedelta(hours=23):
any_torrent_enabled = any(map_iter( any_torrent_enabled = any(map(
lambda x: x.is_active() and getattr(x, 'enable_backlog', None) lambda x: x.is_active() and getattr(x, 'enable_backlog', None)
and GenericProvider.TORRENT == x.providerType, and GenericProvider.TORRENT == x.providerType,
sickgear.providers.sortedProviderList())) sickgear.providers.sortedProviderList()))
@ -291,8 +290,8 @@ class BacklogSearcher(object):
if not runparts and parts: if not runparts and parts:
runparts = parts[0] runparts = parts[0]
wanted_list = filter_list( wanted_list = list(filter(
lambda wi: wi and next(itervalues(wi))[0].show_obj.tvid_prodid in runparts, wanted_list) lambda wi: wi and next(itervalues(wi))[0].show_obj.tvid_prodid in runparts, wanted_list))
limited_wanted_list = [] limited_wanted_list = []
if standard_backlog and not any_torrent_enabled and runparts: if standard_backlog and not any_torrent_enabled and runparts:
@ -314,8 +313,8 @@ class BacklogSearcher(object):
for i, l in enumerate(parts): for i, l in enumerate(parts):
if 0 == i: if 0 == i:
continue continue
cl += map_list(lambda m: ['INSERT INTO backlogparts (part, indexer, indexerid) VALUES (?,?,?)', cl += list(map(lambda m: ['INSERT INTO backlogparts (part, indexer, indexerid) VALUES (?,?,?)',
[i + 1] + TVidProdid(m).list], l) [i + 1] + TVidProdid(m).list], l))
if 0 < len(cl): if 0 < len(cl):
my_db.mass_action(cl) my_db.mass_action(cl)

View file

@ -34,8 +34,6 @@ from .classes import Proper, SimpleNamespace
from .search import wanted_episodes, get_aired_in_season, set_wanted_aired from .search import wanted_episodes, get_aired_in_season, set_wanted_aired
from .tv import TVEpisode from .tv import TVEpisode
from _23 import filter_list
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
from typing import Any, AnyStr, Dict, List, Optional, Union from typing import Any, AnyStr, Dict, List, Optional, Union
@ -520,8 +518,8 @@ class RecentSearchQueueItem(generic_queue.QueueItem):
orig_thread_name = threading.current_thread().name orig_thread_name = threading.current_thread().name
threads = [] threads = []
providers = filter_list(lambda x: x.is_active() and x.enable_recentsearch, providers = list(filter(lambda x: x.is_active() and x.enable_recentsearch,
sickgear.providers.sortedProviderList()) sickgear.providers.sortedProviderList()))
for cur_provider in providers: for cur_provider in providers:
if not cur_provider.cache.should_update(): if not cur_provider.cache.should_update():
continue continue

View file

@ -23,7 +23,7 @@ import sys
import sickgear import sickgear
from dateutil import tz from dateutil import tz
from six import integer_types, PY2, string_types from six import integer_types, string_types
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
@ -283,21 +283,14 @@ class SGDatetime(datetime.datetime):
return (default, timestamp)[isinstance(timestamp, (float, integer_types))] return (default, timestamp)[isinstance(timestamp, (float, integer_types))]
if PY2: # noinspection PyUnreachableCode
""" if False:
Use `timestamp_near` for a timezone aware UTC timestamp in the near future or recent past. # just to trick pycharm in correct type detection
def timestamp_near(d_t):
Under py3, using the faster variable assigned cpython callable, so py2 is set up to mimic the signature types.
Note: the py3 callable is limited to datetime.datetime and does not work with datetime.date.
"""
def _py2timestamp(dt=None):
# type: (datetime.datetime) -> float # type: (datetime.datetime) -> float
try: pass
import time
return int(time.mktime(dt.timetuple()))
except (BaseException, Exception): # py3 native timestamp uses milliseconds
return 0 # noinspection PyRedeclaration
timestamp_near = _py2timestamp # type: Callable[[datetime.datetime], float] timestamp_near = datetime.datetime.timestamp
else:
# py3 native timestamp uses milliseconds
timestamp_near = datetime.datetime.timestamp # type: Callable[[datetime.datetime], float]

Some files were not shown because too many files have changed in this diff Show more