mirror of
https://github.com/SickGear/SickGear.git
synced 2025-01-05 17:43:37 +00:00
Add Fuzzywuzzy lib to sort search results.
Change remove search results filtering from tv info source. Change suppress startup warnings for Fuzzywuzzy and Cheetah libs. Change show name aliases get a score -1 to give the main names priority. Change replace findCertainShow with find_show_by_id for mapped multi-indexer. Change add Trakt info source search interface. Change directly send image after it's been cached. Fix loading CachedImages images with TVDB API v2 changes.
This commit is contained in:
parent
292312636e
commit
07d72e05f1
19 changed files with 1337 additions and 179 deletions
|
@ -32,6 +32,10 @@ import shutil
|
|||
import subprocess
|
||||
import time
|
||||
import threading
|
||||
import warnings
|
||||
|
||||
warnings.filterwarnings('ignore', module=r'.*fuzzywuzzy.*')
|
||||
warnings.filterwarnings('ignore', module=r'.*Cheetah.*')
|
||||
|
||||
if not (2, 7, 9) <= sys.version_info < (3, 0):
|
||||
print('Python %s.%s.%s detected.' % sys.version_info[:3])
|
||||
|
|
80
lib/fuzzywuzzy/StringMatcher.py
Normal file
80
lib/fuzzywuzzy/StringMatcher.py
Normal file
|
@ -0,0 +1,80 @@
|
|||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
"""
|
||||
StringMatcher.py
|
||||
|
||||
ported from python-Levenshtein
|
||||
[https://github.com/miohtama/python-Levenshtein]
|
||||
License available here: https://github.com/miohtama/python-Levenshtein/blob/master/COPYING
|
||||
"""
|
||||
|
||||
from Levenshtein import *
|
||||
from warnings import warn
|
||||
|
||||
|
||||
class StringMatcher:
|
||||
"""A SequenceMatcher-like class built on the top of Levenshtein"""
|
||||
|
||||
def _reset_cache(self):
|
||||
self._ratio = self._distance = None
|
||||
self._opcodes = self._editops = self._matching_blocks = None
|
||||
|
||||
def __init__(self, isjunk=None, seq1='', seq2=''):
|
||||
if isjunk:
|
||||
warn("isjunk not NOT implemented, it will be ignored")
|
||||
self._str1, self._str2 = seq1, seq2
|
||||
self._reset_cache()
|
||||
|
||||
def set_seqs(self, seq1, seq2):
|
||||
self._str1, self._str2 = seq1, seq2
|
||||
self._reset_cache()
|
||||
|
||||
def set_seq1(self, seq1):
|
||||
self._str1 = seq1
|
||||
self._reset_cache()
|
||||
|
||||
def set_seq2(self, seq2):
|
||||
self._str2 = seq2
|
||||
self._reset_cache()
|
||||
|
||||
def get_opcodes(self):
|
||||
if not self._opcodes:
|
||||
if self._editops:
|
||||
self._opcodes = opcodes(self._editops, self._str1, self._str2)
|
||||
else:
|
||||
self._opcodes = opcodes(self._str1, self._str2)
|
||||
return self._opcodes
|
||||
|
||||
def get_editops(self):
|
||||
if not self._editops:
|
||||
if self._opcodes:
|
||||
self._editops = editops(self._opcodes, self._str1, self._str2)
|
||||
else:
|
||||
self._editops = editops(self._str1, self._str2)
|
||||
return self._editops
|
||||
|
||||
def get_matching_blocks(self):
|
||||
if not self._matching_blocks:
|
||||
self._matching_blocks = matching_blocks(self.get_opcodes(),
|
||||
self._str1, self._str2)
|
||||
return self._matching_blocks
|
||||
|
||||
def ratio(self):
|
||||
if not self._ratio:
|
||||
self._ratio = ratio(self._str1, self._str2)
|
||||
return self._ratio
|
||||
|
||||
def quick_ratio(self):
|
||||
# This is usually quick enough :o)
|
||||
if not self._ratio:
|
||||
self._ratio = ratio(self._str1, self._str2)
|
||||
return self._ratio
|
||||
|
||||
def real_quick_ratio(self):
|
||||
len1, len2 = len(self._str1), len(self._str2)
|
||||
return 2.0 * min(len1, len2) / (len1 + len2)
|
||||
|
||||
def distance(self):
|
||||
if not self._distance:
|
||||
self._distance = distance(self._str1, self._str2)
|
||||
return self._distance
|
2
lib/fuzzywuzzy/__init__.py
Normal file
2
lib/fuzzywuzzy/__init__.py
Normal file
|
@ -0,0 +1,2 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
__version__ = '0.15.1'
|
325
lib/fuzzywuzzy/fuzz.py
Normal file
325
lib/fuzzywuzzy/fuzz.py
Normal file
|
@ -0,0 +1,325 @@
|
|||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
"""
|
||||
fuzz.py
|
||||
|
||||
Copyright (c) 2011 Adam Cohen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
import platform
|
||||
import warnings
|
||||
|
||||
try:
|
||||
from .StringMatcher import StringMatcher as SequenceMatcher
|
||||
except ImportError:
|
||||
if platform.python_implementation() != "PyPy":
|
||||
warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning')
|
||||
from difflib import SequenceMatcher
|
||||
|
||||
from . import utils
|
||||
|
||||
|
||||
###########################
|
||||
# Basic Scoring Functions #
|
||||
###########################
|
||||
|
||||
@utils.check_for_none
|
||||
@utils.check_empty_string
|
||||
def ratio(s1, s2):
|
||||
s1, s2 = utils.make_type_consistent(s1, s2)
|
||||
|
||||
m = SequenceMatcher(None, s1, s2)
|
||||
return utils.intr(100 * m.ratio())
|
||||
|
||||
|
||||
@utils.check_for_none
|
||||
@utils.check_empty_string
|
||||
def partial_ratio(s1, s2):
|
||||
""""Return the ratio of the most similar substring
|
||||
as a number between 0 and 100."""
|
||||
s1, s2 = utils.make_type_consistent(s1, s2)
|
||||
|
||||
if len(s1) <= len(s2):
|
||||
shorter = s1
|
||||
longer = s2
|
||||
else:
|
||||
shorter = s2
|
||||
longer = s1
|
||||
|
||||
m = SequenceMatcher(None, shorter, longer)
|
||||
blocks = m.get_matching_blocks()
|
||||
|
||||
# each block represents a sequence of matching characters in a string
|
||||
# of the form (idx_1, idx_2, len)
|
||||
# the best partial match will block align with at least one of those blocks
|
||||
# e.g. shorter = "abcd", longer = XXXbcdeEEE
|
||||
# block = (1,3,3)
|
||||
# best score === ratio("abcd", "Xbcd")
|
||||
scores = []
|
||||
for block in blocks:
|
||||
long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0
|
||||
long_end = long_start + len(shorter)
|
||||
long_substr = longer[long_start:long_end]
|
||||
|
||||
m2 = SequenceMatcher(None, shorter, long_substr)
|
||||
r = m2.ratio()
|
||||
if r > .995:
|
||||
return 100
|
||||
else:
|
||||
scores.append(r)
|
||||
|
||||
return utils.intr(100 * max(scores))
|
||||
|
||||
|
||||
##############################
|
||||
# Advanced Scoring Functions #
|
||||
##############################
|
||||
|
||||
def _process_and_sort(s, force_ascii, full_process=True):
|
||||
"""Return a cleaned string with token sorted."""
|
||||
# pull tokens
|
||||
ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s
|
||||
tokens = ts.split()
|
||||
|
||||
# sort tokens and join
|
||||
sorted_string = u" ".join(sorted(tokens))
|
||||
return sorted_string.strip()
|
||||
|
||||
|
||||
# Sorted Token
|
||||
# find all alphanumeric tokens in the string
|
||||
# sort those tokens and take ratio of resulting joined strings
|
||||
# controls for unordered string elements
|
||||
@utils.check_for_none
|
||||
def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True):
|
||||
sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process)
|
||||
sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process)
|
||||
|
||||
if partial:
|
||||
return partial_ratio(sorted1, sorted2)
|
||||
else:
|
||||
return ratio(sorted1, sorted2)
|
||||
|
||||
|
||||
def token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
|
||||
"""Return a measure of the sequences' similarity between 0 and 100
|
||||
but sorting the token before comparing.
|
||||
"""
|
||||
return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
|
||||
|
||||
|
||||
def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
|
||||
"""Return the ratio of the most similar substring as a number between
|
||||
0 and 100 but sorting the token before comparing.
|
||||
"""
|
||||
return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
|
||||
|
||||
|
||||
@utils.check_for_none
|
||||
def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True):
|
||||
"""Find all alphanumeric tokens in each string...
|
||||
- treat them as a set
|
||||
- construct two strings of the form:
|
||||
<sorted_intersection><sorted_remainder>
|
||||
- take ratios of those two strings
|
||||
- controls for unordered partial matches"""
|
||||
|
||||
p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1
|
||||
p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2
|
||||
|
||||
if not utils.validate_string(p1):
|
||||
return 0
|
||||
if not utils.validate_string(p2):
|
||||
return 0
|
||||
|
||||
# pull tokens
|
||||
tokens1 = set(p1.split())
|
||||
tokens2 = set(p2.split())
|
||||
|
||||
intersection = tokens1.intersection(tokens2)
|
||||
diff1to2 = tokens1.difference(tokens2)
|
||||
diff2to1 = tokens2.difference(tokens1)
|
||||
|
||||
sorted_sect = " ".join(sorted(intersection))
|
||||
sorted_1to2 = " ".join(sorted(diff1to2))
|
||||
sorted_2to1 = " ".join(sorted(diff2to1))
|
||||
|
||||
combined_1to2 = sorted_sect + " " + sorted_1to2
|
||||
combined_2to1 = sorted_sect + " " + sorted_2to1
|
||||
|
||||
# strip
|
||||
sorted_sect = sorted_sect.strip()
|
||||
combined_1to2 = combined_1to2.strip()
|
||||
combined_2to1 = combined_2to1.strip()
|
||||
|
||||
if partial:
|
||||
ratio_func = partial_ratio
|
||||
else:
|
||||
ratio_func = ratio
|
||||
|
||||
pairwise = [
|
||||
ratio_func(sorted_sect, combined_1to2),
|
||||
ratio_func(sorted_sect, combined_2to1),
|
||||
ratio_func(combined_1to2, combined_2to1)
|
||||
]
|
||||
return max(pairwise)
|
||||
|
||||
|
||||
def token_set_ratio(s1, s2, force_ascii=True, full_process=True):
|
||||
return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
|
||||
|
||||
|
||||
def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True):
|
||||
return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
|
||||
|
||||
|
||||
###################
|
||||
# Combination API #
|
||||
###################
|
||||
|
||||
# q is for quick
|
||||
def QRatio(s1, s2, force_ascii=True, full_process=True):
|
||||
"""
|
||||
Quick ratio comparison between two strings.
|
||||
|
||||
Runs full_process from utils on both strings
|
||||
Short circuits if either of the strings is empty after processing.
|
||||
|
||||
:param s1:
|
||||
:param s2:
|
||||
:param force_ascii: Allow only ASCII characters (Default: True)
|
||||
:full_process: Process inputs, used here to avoid double processing in extract functions (Default: True)
|
||||
:return: similarity ratio
|
||||
"""
|
||||
|
||||
if full_process:
|
||||
p1 = utils.full_process(s1, force_ascii=force_ascii)
|
||||
p2 = utils.full_process(s2, force_ascii=force_ascii)
|
||||
else:
|
||||
p1 = s1
|
||||
p2 = s2
|
||||
|
||||
if not utils.validate_string(p1):
|
||||
return 0
|
||||
if not utils.validate_string(p2):
|
||||
return 0
|
||||
|
||||
return ratio(p1, p2)
|
||||
|
||||
|
||||
def UQRatio(s1, s2, full_process=True):
|
||||
"""
|
||||
Unicode quick ratio
|
||||
|
||||
Calls QRatio with force_ascii set to False
|
||||
|
||||
:param s1:
|
||||
:param s2:
|
||||
:return: similarity ratio
|
||||
"""
|
||||
return QRatio(s1, s2, force_ascii=False, full_process=full_process)
|
||||
|
||||
|
||||
# w is for weighted
|
||||
def WRatio(s1, s2, force_ascii=True, full_process=True):
|
||||
"""
|
||||
Return a measure of the sequences' similarity between 0 and 100, using different algorithms.
|
||||
|
||||
**Steps in the order they occur**
|
||||
|
||||
#. Run full_process from utils on both strings
|
||||
#. Short circuit if this makes either string empty
|
||||
#. Take the ratio of the two processed strings (fuzz.ratio)
|
||||
#. Run checks to compare the length of the strings
|
||||
* If one of the strings is more than 1.5 times as long as the other
|
||||
use partial_ratio comparisons - scale partial results by 0.9
|
||||
(this makes sure only full results can return 100)
|
||||
* If one of the strings is over 8 times as long as the other
|
||||
instead scale by 0.6
|
||||
|
||||
#. Run the other ratio functions
|
||||
* if using partial ratio functions call partial_ratio,
|
||||
partial_token_sort_ratio and partial_token_set_ratio
|
||||
scale all of these by the ratio based on length
|
||||
* otherwise call token_sort_ratio and token_set_ratio
|
||||
* all token based comparisons are scaled by 0.95
|
||||
(on top of any partial scalars)
|
||||
|
||||
#. Take the highest value from these results
|
||||
round it and return it as an integer.
|
||||
|
||||
:param s1:
|
||||
:param s2:
|
||||
:param force_ascii: Allow only ascii characters
|
||||
:type force_ascii: bool
|
||||
:full_process: Process inputs, used here to avoid double processing in extract functions (Default: True)
|
||||
:return:
|
||||
"""
|
||||
|
||||
if full_process:
|
||||
p1 = utils.full_process(s1, force_ascii=force_ascii)
|
||||
p2 = utils.full_process(s2, force_ascii=force_ascii)
|
||||
else:
|
||||
p1 = s1
|
||||
p2 = s2
|
||||
|
||||
if not utils.validate_string(p1):
|
||||
return 0
|
||||
if not utils.validate_string(p2):
|
||||
return 0
|
||||
|
||||
# should we look at partials?
|
||||
try_partial = True
|
||||
unbase_scale = .95
|
||||
partial_scale = .90
|
||||
|
||||
base = ratio(p1, p2)
|
||||
len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2))
|
||||
|
||||
# if strings are similar length, don't use partials
|
||||
if len_ratio < 1.5:
|
||||
try_partial = False
|
||||
|
||||
# if one string is much much shorter than the other
|
||||
if len_ratio > 8:
|
||||
partial_scale = .6
|
||||
|
||||
if try_partial:
|
||||
partial = partial_ratio(p1, p2) * partial_scale
|
||||
ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \
|
||||
* unbase_scale * partial_scale
|
||||
ptser = partial_token_set_ratio(p1, p2, full_process=False) \
|
||||
* unbase_scale * partial_scale
|
||||
|
||||
return utils.intr(max(base, partial, ptsor, ptser))
|
||||
else:
|
||||
tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale
|
||||
tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale
|
||||
|
||||
return utils.intr(max(base, tsor, tser))
|
||||
|
||||
|
||||
def UWRatio(s1, s2, full_process=True):
|
||||
"""Return a measure of the sequences' similarity between 0 and 100,
|
||||
using different algorithms. Same as WRatio but preserving unicode.
|
||||
"""
|
||||
return WRatio(s1, s2, force_ascii=False, full_process=full_process)
|
310
lib/fuzzywuzzy/process.py
Normal file
310
lib/fuzzywuzzy/process.py
Normal file
|
@ -0,0 +1,310 @@
|
|||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
"""
|
||||
process.py
|
||||
|
||||
Copyright (c) 2011 Adam Cohen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
"""
|
||||
|
||||
from . import fuzz
|
||||
from . import utils
|
||||
import heapq
|
||||
import logging
|
||||
from functools import partial
|
||||
|
||||
|
||||
default_scorer = fuzz.WRatio
|
||||
|
||||
|
||||
default_processor = utils.full_process
|
||||
|
||||
|
||||
def extractWithoutOrder(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0):
|
||||
"""Select the best match in a list or dictionary of choices.
|
||||
|
||||
Find best matches in a list or dictionary of choices, return a
|
||||
generator of tuples containing the match and its score. If a dictionary
|
||||
is used, also returns the key for each match.
|
||||
|
||||
Arguments:
|
||||
query: An object representing the thing we want to find.
|
||||
choices: An iterable or dictionary-like object containing choices
|
||||
to be matched against the query. Dictionary arguments of
|
||||
{key: value} pairs will attempt to match the query against
|
||||
each value.
|
||||
processor: Optional function of the form f(a) -> b, where a is the query or
|
||||
individual choice and b is the choice to be used in matching.
|
||||
|
||||
This can be used to match against, say, the first element of
|
||||
a list:
|
||||
|
||||
lambda x: x[0]
|
||||
|
||||
Defaults to fuzzywuzzy.utils.full_process().
|
||||
scorer: Optional function for scoring matches between the query and
|
||||
an individual processed choice. This should be a function
|
||||
of the form f(query, choice) -> int.
|
||||
|
||||
By default, fuzz.WRatio() is used and expects both query and
|
||||
choice to be strings.
|
||||
score_cutoff: Optional argument for score threshold. No matches with
|
||||
a score less than this number will be returned. Defaults to 0.
|
||||
|
||||
Returns:
|
||||
Generator of tuples containing the match and its score.
|
||||
|
||||
If a list is used for choices, then the result will be 2-tuples.
|
||||
If a dictionary is used, then the result will be 3-tuples containing
|
||||
the key for each match.
|
||||
|
||||
For example, searching for 'bird' in the dictionary
|
||||
|
||||
{'bard': 'train', 'dog': 'man'}
|
||||
|
||||
may return
|
||||
|
||||
('train', 22, 'bard'), ('man', 0, 'dog')
|
||||
"""
|
||||
# Catch generators without lengths
|
||||
def no_process(x):
|
||||
return x
|
||||
|
||||
try:
|
||||
if choices is None or len(choices) == 0:
|
||||
raise StopIteration
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# If the processor was removed by setting it to None
|
||||
# perfom a noop as it still needs to be a function
|
||||
if processor is None:
|
||||
processor = no_process
|
||||
|
||||
# Run the processor on the input query.
|
||||
processed_query = processor(query)
|
||||
|
||||
if len(processed_query) == 0:
|
||||
logging.warning(u"Applied processor reduces input query to empty string, "
|
||||
"all comparisons will have score 0. "
|
||||
"[Query: \'{0}\']".format(query))
|
||||
|
||||
# Don't run full_process twice
|
||||
if scorer in [fuzz.WRatio, fuzz.QRatio,
|
||||
fuzz.token_set_ratio, fuzz.token_sort_ratio,
|
||||
fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio,
|
||||
fuzz.UWRatio, fuzz.UQRatio] \
|
||||
and processor == utils.full_process:
|
||||
processor = no_process
|
||||
|
||||
# Only process the query once instead of for every choice
|
||||
if scorer in [fuzz.UWRatio, fuzz.UQRatio]:
|
||||
pre_processor = partial(utils.full_process, force_ascii=False)
|
||||
scorer = partial(scorer, full_process=False)
|
||||
elif scorer in [fuzz.WRatio, fuzz.QRatio,
|
||||
fuzz.token_set_ratio, fuzz.token_sort_ratio,
|
||||
fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio]:
|
||||
pre_processor = partial(utils.full_process, force_ascii=True)
|
||||
scorer = partial(scorer, full_process=False)
|
||||
else:
|
||||
pre_processor = no_process
|
||||
processed_query = pre_processor(processed_query)
|
||||
|
||||
try:
|
||||
# See if choices is a dictionary-like object.
|
||||
for key, choice in choices.items():
|
||||
processed = pre_processor(processor(choice))
|
||||
score = scorer(processed_query, processed)
|
||||
if score >= score_cutoff:
|
||||
yield (choice, score, key)
|
||||
except AttributeError:
|
||||
# It's a list; just iterate over it.
|
||||
for choice in choices:
|
||||
processed = pre_processor(processor(choice))
|
||||
score = scorer(processed_query, processed)
|
||||
if score >= score_cutoff:
|
||||
yield (choice, score)
|
||||
|
||||
|
||||
def extract(query, choices, processor=default_processor, scorer=default_scorer, limit=5):
|
||||
"""Select the best match in a list or dictionary of choices.
|
||||
|
||||
Find best matches in a list or dictionary of choices, return a
|
||||
list of tuples containing the match and its score. If a dictionary
|
||||
is used, also returns the key for each match.
|
||||
|
||||
Arguments:
|
||||
query: An object representing the thing we want to find.
|
||||
choices: An iterable or dictionary-like object containing choices
|
||||
to be matched against the query. Dictionary arguments of
|
||||
{key: value} pairs will attempt to match the query against
|
||||
each value.
|
||||
processor: Optional function of the form f(a) -> b, where a is the query or
|
||||
individual choice and b is the choice to be used in matching.
|
||||
|
||||
This can be used to match against, say, the first element of
|
||||
a list:
|
||||
|
||||
lambda x: x[0]
|
||||
|
||||
Defaults to fuzzywuzzy.utils.full_process().
|
||||
scorer: Optional function for scoring matches between the query and
|
||||
an individual processed choice. This should be a function
|
||||
of the form f(query, choice) -> int.
|
||||
By default, fuzz.WRatio() is used and expects both query and
|
||||
choice to be strings.
|
||||
limit: Optional maximum for the number of elements returned. Defaults
|
||||
to 5.
|
||||
|
||||
Returns:
|
||||
List of tuples containing the match and its score.
|
||||
|
||||
If a list is used for choices, then the result will be 2-tuples.
|
||||
If a dictionary is used, then the result will be 3-tuples containing
|
||||
the key for each match.
|
||||
|
||||
For example, searching for 'bird' in the dictionary
|
||||
|
||||
{'bard': 'train', 'dog': 'man'}
|
||||
|
||||
may return
|
||||
|
||||
[('train', 22, 'bard'), ('man', 0, 'dog')]
|
||||
"""
|
||||
sl = extractWithoutOrder(query, choices, processor, scorer)
|
||||
return heapq.nlargest(limit, sl, key=lambda i: i[1]) if limit is not None else \
|
||||
sorted(sl, key=lambda i: i[1], reverse=True)
|
||||
|
||||
|
||||
def extractBests(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0, limit=5):
|
||||
"""Get a list of the best matches to a collection of choices.
|
||||
|
||||
Convenience function for getting the choices with best scores.
|
||||
|
||||
Args:
|
||||
query: A string to match against
|
||||
choices: A list or dictionary of choices, suitable for use with
|
||||
extract().
|
||||
processor: Optional function for transforming choices before matching.
|
||||
See extract().
|
||||
scorer: Scoring function for extract().
|
||||
score_cutoff: Optional argument for score threshold. No matches with
|
||||
a score less than this number will be returned. Defaults to 0.
|
||||
limit: Optional maximum for the number of elements returned. Defaults
|
||||
to 5.
|
||||
|
||||
Returns: A a list of (match, score) tuples.
|
||||
"""
|
||||
|
||||
best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff)
|
||||
return heapq.nlargest(limit, best_list, key=lambda i: i[1]) if limit is not None else \
|
||||
sorted(best_list, key=lambda i: i[1], reverse=True)
|
||||
|
||||
|
||||
def extractOne(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0):
|
||||
"""Find the single best match above a score in a list of choices.
|
||||
|
||||
This is a convenience method which returns the single best choice.
|
||||
See extract() for the full arguments list.
|
||||
|
||||
Args:
|
||||
query: A string to match against
|
||||
choices: A list or dictionary of choices, suitable for use with
|
||||
extract().
|
||||
processor: Optional function for transforming choices before matching.
|
||||
See extract().
|
||||
scorer: Scoring function for extract().
|
||||
score_cutoff: Optional argument for score threshold. If the best
|
||||
match is found, but it is not greater than this number, then
|
||||
return None anyway ("not a good enough match"). Defaults to 0.
|
||||
|
||||
Returns:
|
||||
A tuple containing a single match and its score, if a match
|
||||
was found that was above score_cutoff. Otherwise, returns None.
|
||||
"""
|
||||
best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff)
|
||||
try:
|
||||
return max(best_list, key=lambda i: i[1])
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio):
|
||||
"""This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify
|
||||
and remove duplicates. Specifically, it uses the process.extract to identify duplicates that
|
||||
score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list
|
||||
since we assume this item contains the most entity information and returns that. It breaks string
|
||||
length ties on an alphabetical sort.
|
||||
|
||||
Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the
|
||||
returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less
|
||||
sensitive.
|
||||
|
||||
Args:
|
||||
contains_dupes: A list of strings that we would like to dedupe.
|
||||
threshold: the numerical value (0,100) point at which we expect to find duplicates.
|
||||
Defaults to 70 out of 100
|
||||
scorer: Optional function for scoring matches between the query and
|
||||
an individual processed choice. This should be a function
|
||||
of the form f(query, choice) -> int.
|
||||
By default, fuzz.token_set_ratio() is used and expects both query and
|
||||
choice to be strings.
|
||||
|
||||
Returns:
|
||||
A deduplicated list. For example:
|
||||
|
||||
In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins']
|
||||
In: fuzzy_dedupe(contains_dupes)
|
||||
Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf']
|
||||
"""
|
||||
|
||||
extractor = []
|
||||
|
||||
# iterate over items in *contains_dupes*
|
||||
for item in contains_dupes:
|
||||
# return all duplicate matches found
|
||||
matches = extract(item, contains_dupes, limit=None, scorer=scorer)
|
||||
# filter matches based on the threshold
|
||||
filtered = [x for x in matches if x[1] > threshold]
|
||||
# if there is only 1 item in *filtered*, no duplicates were found so append to *extracted*
|
||||
if len(filtered) == 1:
|
||||
extractor.append(filtered[0][0])
|
||||
|
||||
else:
|
||||
# alpha sort
|
||||
filtered = sorted(filtered, key=lambda x: x[0])
|
||||
# length sort
|
||||
filter_sort = sorted(filtered, key=lambda x: len(x[0]), reverse=True)
|
||||
# take first item as our 'canonical example'
|
||||
extractor.append(filter_sort[0][0])
|
||||
|
||||
# uniquify *extractor* list
|
||||
keys = {}
|
||||
for e in extractor:
|
||||
keys[e] = 1
|
||||
extractor = keys.keys()
|
||||
|
||||
# check that extractor differs from contain_dupes (e.g. duplicates were found)
|
||||
# if not, then return the original list
|
||||
if len(extractor) == len(contains_dupes):
|
||||
return contains_dupes
|
||||
else:
|
||||
return extractor
|
30
lib/fuzzywuzzy/string_processing.py
Normal file
30
lib/fuzzywuzzy/string_processing.py
Normal file
|
@ -0,0 +1,30 @@
|
|||
from __future__ import unicode_literals
|
||||
import re
|
||||
import string
|
||||
import sys
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
if PY3:
|
||||
string = str
|
||||
|
||||
|
||||
class StringProcessor(object):
|
||||
"""
|
||||
This class defines method to process strings in the most
|
||||
efficient way. Ideally all the methods below use unicode strings
|
||||
for both input and output.
|
||||
"""
|
||||
|
||||
regex = re.compile(r"(?ui)\W")
|
||||
|
||||
@classmethod
|
||||
def replace_non_letters_non_numbers_with_whitespace(cls, a_string):
|
||||
"""
|
||||
This function replaces any sequence of non letters and non
|
||||
numbers with a single white space.
|
||||
"""
|
||||
return cls.regex.sub(" ", a_string)
|
||||
|
||||
strip = staticmethod(string.strip)
|
||||
to_lower_case = staticmethod(string.lower)
|
||||
to_upper_case = staticmethod(string.upper)
|
99
lib/fuzzywuzzy/utils.py
Normal file
99
lib/fuzzywuzzy/utils.py
Normal file
|
@ -0,0 +1,99 @@
|
|||
from __future__ import unicode_literals
|
||||
import sys
|
||||
import functools
|
||||
|
||||
from fuzzywuzzy.string_processing import StringProcessor
|
||||
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
|
||||
def validate_string(s):
|
||||
"""
|
||||
Check input has length and that length > 0
|
||||
|
||||
:param s:
|
||||
:return: True if len(s) > 0 else False
|
||||
"""
|
||||
try:
|
||||
return len(s) > 0
|
||||
except TypeError:
|
||||
return False
|
||||
|
||||
|
||||
def check_for_none(func):
|
||||
@functools.wraps(func)
|
||||
def decorator(*args, **kwargs):
|
||||
if args[0] is None or args[1] is None:
|
||||
return 0
|
||||
return func(*args, **kwargs)
|
||||
return decorator
|
||||
|
||||
|
||||
def check_empty_string(func):
|
||||
@functools.wraps(func)
|
||||
def decorator(*args, **kwargs):
|
||||
if len(args[0]) == 0 or len(args[1]) == 0:
|
||||
return 0
|
||||
return func(*args, **kwargs)
|
||||
return decorator
|
||||
|
||||
|
||||
bad_chars = str("").join([chr(i) for i in range(128, 256)]) # ascii dammit!
|
||||
if PY3:
|
||||
translation_table = dict((ord(c), None) for c in bad_chars)
|
||||
unicode = str
|
||||
|
||||
|
||||
def asciionly(s):
|
||||
if PY3:
|
||||
return s.translate(translation_table)
|
||||
else:
|
||||
return s.translate(None, bad_chars)
|
||||
|
||||
|
||||
def asciidammit(s):
|
||||
if type(s) is str:
|
||||
return asciionly(s)
|
||||
elif type(s) is unicode:
|
||||
return asciionly(s.encode('ascii', 'ignore'))
|
||||
else:
|
||||
return asciidammit(unicode(s))
|
||||
|
||||
|
||||
def make_type_consistent(s1, s2):
|
||||
"""If both objects aren't either both string or unicode instances force them to unicode"""
|
||||
if isinstance(s1, str) and isinstance(s2, str):
|
||||
return s1, s2
|
||||
|
||||
elif isinstance(s1, unicode) and isinstance(s2, unicode):
|
||||
return s1, s2
|
||||
|
||||
else:
|
||||
return unicode(s1), unicode(s2)
|
||||
|
||||
|
||||
def full_process(s, force_ascii=False):
|
||||
"""Process string by
|
||||
-- removing all but letters and numbers
|
||||
-- trim whitespace
|
||||
-- force to lower case
|
||||
if force_ascii == True, force convert to ascii"""
|
||||
|
||||
if s is None:
|
||||
return ""
|
||||
|
||||
if force_ascii:
|
||||
s = asciidammit(s)
|
||||
# Keep only Letters and Numbers (see Unicode docs).
|
||||
string_out = StringProcessor.replace_non_letters_non_numbers_with_whitespace(s)
|
||||
# Force into lowercase.
|
||||
string_out = StringProcessor.to_lower_case(string_out)
|
||||
# Remove leading and trailing whitespaces.
|
||||
string_out = StringProcessor.strip(string_out)
|
||||
return string_out
|
||||
|
||||
|
||||
def intr(n):
|
||||
'''Returns a correctly rounded integer'''
|
||||
return int(round(n))
|
|
@ -1 +1,2 @@
|
|||
from trakt import TraktAPI
|
||||
from indexerapiinterface import TraktIndexer
|
||||
|
|
|
@ -1,10 +1,14 @@
|
|||
class TraktException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class TraktAuthException(TraktException):
|
||||
pass
|
||||
|
||||
|
||||
class TraktServerBusy(TraktException):
|
||||
pass
|
||||
class TraktException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class TraktAuthException(TraktException):
|
||||
pass
|
||||
|
||||
|
||||
class TraktServerBusy(TraktException):
|
||||
pass
|
||||
|
||||
|
||||
class TraktShowNotFound(TraktException):
|
||||
pass
|
177
lib/libtrakt/indexerapiinterface.py
Normal file
177
lib/libtrakt/indexerapiinterface.py
Normal file
|
@ -0,0 +1,177 @@
|
|||
import logging
|
||||
import re
|
||||
import time
|
||||
from .exceptions import TraktShowNotFound, TraktException
|
||||
from sickbeard.exceptions import ex
|
||||
from trakt import TraktAPI
|
||||
|
||||
|
||||
class ShowContainer(dict):
|
||||
"""Simple dict that holds a series of Show instances
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(ShowContainer, self).__init__(**kwargs)
|
||||
|
||||
self._stack = []
|
||||
self._lastgc = time.time()
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
|
||||
self._stack.append(key)
|
||||
|
||||
# keep only the 100th latest results
|
||||
if time.time() - self._lastgc > 20:
|
||||
for o in self._stack[:-100]:
|
||||
del self[o]
|
||||
|
||||
self._stack = self._stack[-100:]
|
||||
|
||||
self._lastgc = time.time()
|
||||
|
||||
super(ShowContainer, self).__setitem__(key, value)
|
||||
|
||||
|
||||
def log():
|
||||
return logging.getLogger('trakt_api')
|
||||
|
||||
|
||||
class TraktSearchTypes:
|
||||
text = 1
|
||||
trakt_id = 'trakt'
|
||||
tvdb_id = 'tvdb'
|
||||
imdb_id = 'imdb'
|
||||
tmdb_id = 'tmdb'
|
||||
tvrage_id = 'tvrage'
|
||||
all = [text, trakt_id, tvdb_id, imdb_id, tmdb_id, tvrage_id]
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
||||
class TraktResultTypes:
|
||||
show = 'show'
|
||||
episode = 'episode'
|
||||
movie = 'movie'
|
||||
person = 'person'
|
||||
list = 'list'
|
||||
all = [show, episode, movie, person, list]
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
||||
class TraktIndexer:
|
||||
# noinspection PyUnusedLocal
|
||||
# noinspection PyDefaultArgument
|
||||
def __init__(self, custom_ui=None, sleep_retry=None, search_type=TraktSearchTypes.text,
|
||||
result_types=[TraktResultTypes.show], *args, **kwargs):
|
||||
|
||||
self.config = {
|
||||
'apikey': '',
|
||||
'debug_enabled': False,
|
||||
'custom_ui': custom_ui,
|
||||
'proxy': None,
|
||||
'cache_enabled': False,
|
||||
'cache_location': '',
|
||||
'valid_languages': [],
|
||||
'langabbv_to_id': {},
|
||||
'language': 'en',
|
||||
'base_url': '',
|
||||
'search_type': search_type if search_type in TraktSearchTypes.all else TraktSearchTypes.text,
|
||||
'sleep_retry': sleep_retry,
|
||||
'result_types': result_types if isinstance(result_types, list) and all(x in TraktResultTypes.all for x in result_types) else [TraktResultTypes.show],
|
||||
}
|
||||
|
||||
self.corrections = {}
|
||||
self.shows = ShowContainer()
|
||||
|
||||
def _get_series(self, series):
|
||||
"""This searches Trakt for the series name,
|
||||
If a custom_ui UI is configured, it uses this to select the correct
|
||||
series.
|
||||
"""
|
||||
all_series = self.search(series)
|
||||
if not isinstance(all_series, list):
|
||||
all_series = [all_series]
|
||||
|
||||
if 0 == len(all_series):
|
||||
log().debug('Series result returned zero')
|
||||
raise TraktShowNotFound('Show-name search returned zero results (cannot find show on TVDB)')
|
||||
|
||||
if None is not self.config['custom_ui']:
|
||||
log().debug('Using custom UI %s' % (repr(self.config['custom_ui'])))
|
||||
custom_ui = self.config['custom_ui']
|
||||
ui = custom_ui(config=self.config)
|
||||
|
||||
return ui.select_series(all_series)
|
||||
|
||||
return all_series
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Handles trakt_instance['seriesname'] calls.
|
||||
The dict index should be the show id
|
||||
"""
|
||||
if isinstance(key, tuple) and 2 == len(key):
|
||||
key = key[0]
|
||||
|
||||
self.config['searchterm'] = key
|
||||
selected_series = self._get_series(key)
|
||||
if isinstance(selected_series, dict):
|
||||
selected_series = [selected_series]
|
||||
|
||||
return selected_series
|
||||
|
||||
def __repr__(self):
|
||||
return str(self.shows)
|
||||
|
||||
def _clean_data(self, data):
|
||||
"""Cleans up strings, lists, dicts returned
|
||||
|
||||
Issues corrected:
|
||||
- Replaces & with &
|
||||
- Trailing whitespace
|
||||
"""
|
||||
if isinstance(data, list):
|
||||
return [self._clean_data(d) for d in data]
|
||||
if isinstance(data, dict):
|
||||
return {k: self._clean_data(v) for k, v in data.iteritems()}
|
||||
return data if not isinstance(data, (str, unicode)) else data.strip().replace(u'&', u'&')
|
||||
|
||||
@staticmethod
|
||||
def _dict_prevent_none(d, key, default):
|
||||
v = None
|
||||
if isinstance(d, dict):
|
||||
v = d.get(key, default)
|
||||
return (v, default)[None is v]
|
||||
|
||||
def search(self, series):
|
||||
if TraktSearchTypes.text != self.config['search_type']:
|
||||
url = '/search/%s/%s?type=%s&extended=full&limit=100' % (self.config['search_type'], series,
|
||||
','.join(self.config['result_types']))
|
||||
else:
|
||||
url = '/search/%s?query=%s&extended=full&limit=100' % (','.join(self.config['result_types']), series)
|
||||
filtered = []
|
||||
kwargs = {}
|
||||
if None is not self.config['sleep_retry']:
|
||||
kwargs['sleep_retry'] = self.config['sleep_retry']
|
||||
try:
|
||||
resp = TraktAPI().trakt_request(url, **kwargs)
|
||||
if len(resp):
|
||||
for d in resp:
|
||||
if isinstance(d, dict) and 'type' in d and d['type'] in self.config['result_types']:
|
||||
for k, v in d.iteritems():
|
||||
d[k] = self._clean_data(v)
|
||||
if 'show' in d and TraktResultTypes.show == d['type']:
|
||||
d.update(d['show'])
|
||||
del d['show']
|
||||
d['seriesname'] = self._dict_prevent_none(d, 'title', '')
|
||||
d['genres_list'] = d.get('genres', [])
|
||||
d['genres'] = ', '.join(['%s' % v for v in d.get('genres', []) or [] if v])
|
||||
d['firstaired'] = (d.get('first_aired') and
|
||||
re.sub(r'T.*$', '', str(d.get('first_aired'))) or d.get('year'))
|
||||
filtered.append(d)
|
||||
except TraktException as e:
|
||||
log().debug('Could not connect to Trakt service: %s' % ex(e))
|
||||
|
||||
return filtered
|
|
@ -6,7 +6,7 @@ import time
|
|||
import datetime
|
||||
from sickbeard import logger
|
||||
|
||||
from exceptions import TraktException, TraktAuthException # , TraktServerBusy
|
||||
from .exceptions import TraktException, TraktAuthException # , TraktServerBusy
|
||||
|
||||
|
||||
class TraktAccount:
|
||||
|
|
|
@ -174,9 +174,9 @@ class Show(dict):
|
|||
Search terms are converted to lower case (unicode) strings.
|
||||
|
||||
# Examples
|
||||
|
||||
|
||||
These examples assume t is an instance of Tvdb():
|
||||
|
||||
|
||||
>> t = Tvdb()
|
||||
>>
|
||||
|
||||
|
@ -347,6 +347,7 @@ class Tvdb:
|
|||
u'My Last Day'
|
||||
"""
|
||||
|
||||
# noinspection PyUnusedLocal
|
||||
def __init__(self,
|
||||
interactive=False,
|
||||
select_first=False,
|
||||
|
@ -363,7 +364,9 @@ class Tvdb:
|
|||
search_all_languages=False,
|
||||
apikey=None,
|
||||
dvdorder=False,
|
||||
proxy=None):
|
||||
proxy=None,
|
||||
*args,
|
||||
**kwargs):
|
||||
|
||||
"""interactive (True/False):
|
||||
When True, uses built-in console UI is used to select the correct show.
|
||||
|
@ -665,15 +668,18 @@ class Tvdb:
|
|||
else:
|
||||
self.shows[sid].data[key] = value
|
||||
|
||||
@staticmethod
|
||||
def _clean_data(data):
|
||||
"""Cleans up strings returned by TheTVDB.com
|
||||
def _clean_data(self, data):
|
||||
"""Cleans up strings, lists, dicts returned
|
||||
|
||||
Issues corrected:
|
||||
- Replaces & with &
|
||||
- Trailing whitespace
|
||||
"""
|
||||
return data if not isinstance(data, basestring) else data.strip().replace(u'&', u'&')
|
||||
if isinstance(data, list):
|
||||
return [self._clean_data(d) for d in data]
|
||||
if isinstance(data, dict):
|
||||
return {k: self._clean_data(v) for k, v in data.iteritems()}
|
||||
return data if not isinstance(data, (str, unicode)) else data.strip().replace(u'&', u'&')
|
||||
|
||||
def search(self, series):
|
||||
"""This searches TheTVDB.com for the series name
|
||||
|
@ -719,7 +725,7 @@ class Tvdb:
|
|||
log().debug('Interactively selecting show using ConsoleUI')
|
||||
ui = ConsoleUI(config=self.config)
|
||||
|
||||
return ui.selectSeries(all_series)
|
||||
return ui.select_series(all_series)
|
||||
|
||||
def _parse_banners(self, sid, img_list):
|
||||
banners = {}
|
||||
|
|
|
@ -13,14 +13,14 @@ A UI is a callback. A class, it's __init__ function takes two arguments:
|
|||
- log, which is Tvdb's logger instance (which uses the logging module). You can
|
||||
call log.info() log.warning() etc
|
||||
|
||||
It must have a method "selectSeries", this is passed a list of dicts, each dict
|
||||
It must have a method "select_series", this is passed a list of dicts, each dict
|
||||
contains the the keys "name" (human readable show name), and "sid" (the shows
|
||||
ID as on thetvdb.com). For example:
|
||||
|
||||
[{'name': u'Lost', 'sid': u'73739'},
|
||||
{'name': u'Lost Universe', 'sid': u'73181'}]
|
||||
|
||||
The "selectSeries" method must return the appropriate dict, or it can raise
|
||||
The "select_series" method must return the appropriate dict, or it can raise
|
||||
tvdb_userabort (if the selection is aborted), tvdb_shownotfound (if the show
|
||||
cannot be found).
|
||||
|
||||
|
@ -29,7 +29,7 @@ A simple example callback, which returns a random series:
|
|||
>>> import random
|
||||
>>> from tvdb_ui import BaseUI
|
||||
>>> class RandomUI(BaseUI):
|
||||
... def selectSeries(self, allSeries):
|
||||
... def select_series(self, allSeries):
|
||||
... import random
|
||||
... return random.choice(allSeries)
|
||||
|
||||
|
@ -50,9 +50,11 @@ import warnings
|
|||
|
||||
from tvdb_exceptions import tvdb_userabort
|
||||
|
||||
|
||||
def log():
|
||||
return logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseUI:
|
||||
"""Default non-interactive UI, which auto-selects first results
|
||||
"""
|
||||
|
@ -64,8 +66,8 @@ class BaseUI:
|
|||
"The self.log attribute will be removed in the next version")
|
||||
self.log = logging.getLogger(__name__)
|
||||
|
||||
def selectSeries(self, allSeries):
|
||||
return allSeries[0]
|
||||
def select_series(self, all_series):
|
||||
return all_series[0]
|
||||
|
||||
|
||||
class ConsoleUI(BaseUI):
|
||||
|
@ -98,17 +100,17 @@ class ConsoleUI(BaseUI):
|
|||
extra
|
||||
)
|
||||
|
||||
def selectSeries(self, allSeries):
|
||||
self._displaySeries(allSeries)
|
||||
def select_series(self, all_series):
|
||||
self._displaySeries(all_series)
|
||||
|
||||
if len(allSeries) == 1:
|
||||
if len(all_series) == 1:
|
||||
# Single result, return it!
|
||||
print "Automatically selecting only result"
|
||||
return allSeries[0]
|
||||
return all_series[0]
|
||||
|
||||
if self.config['select_first'] is True:
|
||||
print "Automatically returning first search result"
|
||||
return allSeries[0]
|
||||
return all_series[0]
|
||||
|
||||
while True: # return breaks this loop
|
||||
try:
|
||||
|
@ -126,7 +128,7 @@ class ConsoleUI(BaseUI):
|
|||
if len(ans.strip()) == 0:
|
||||
# Default option
|
||||
log().debug('Default option, returning first series')
|
||||
return allSeries[0]
|
||||
return all_series[0]
|
||||
if ans == "q":
|
||||
log().debug('Got quit command (q)')
|
||||
raise tvdb_userabort("User aborted ('q' quit command)")
|
||||
|
@ -139,15 +141,15 @@ class ConsoleUI(BaseUI):
|
|||
print "# q - abort tvnamer"
|
||||
print "# Press return with no input to select first result"
|
||||
elif ans.lower() in ["a", "all"]:
|
||||
self._displaySeries(allSeries, limit = None)
|
||||
self._displaySeries(all_series, limit = None)
|
||||
else:
|
||||
log().debug('Unknown keypress %s' % (ans))
|
||||
else:
|
||||
log().debug('Trying to return ID: %d' % (selected_id))
|
||||
try:
|
||||
return allSeries[selected_id]
|
||||
return all_series[selected_id]
|
||||
except IndexError:
|
||||
log().debug('Invalid show number entered!')
|
||||
print "Invalid number (%s) selected!"
|
||||
self._displaySeries(allSeries)
|
||||
self._displaySeries(all_series)
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ CFG = None
|
|||
CONFIG_FILE = None
|
||||
|
||||
# This is the version of the config we EXPECT to find
|
||||
CONFIG_VERSION = 15
|
||||
CONFIG_VERSION = 16
|
||||
|
||||
# Default encryption version (0 for None)
|
||||
ENCRYPTION_VERSION = 0
|
||||
|
|
|
@ -19,7 +19,6 @@ import re
|
|||
import datetime
|
||||
|
||||
import sickbeard
|
||||
from lib.dateutil import parser
|
||||
from sickbeard.common import Quality
|
||||
from unidecode import unidecode
|
||||
|
||||
|
@ -72,20 +71,15 @@ class SearchResult:
|
|||
if self.provider is None:
|
||||
return 'Invalid provider, unable to print self'
|
||||
|
||||
myString = '%s @ %s\n' % (self.provider.name, self.url)
|
||||
myString += 'Extra Info:\n'
|
||||
for extra in self.extraInfo:
|
||||
myString += ' %s\n' % extra
|
||||
myString += 'Episode: %s\n' % self.episodes
|
||||
myString += 'Quality: %s\n' % Quality.qualityStrings[self.quality]
|
||||
myString += 'Name: %s\n' % self.name
|
||||
myString += 'Size: %s\n' % str(self.size)
|
||||
myString += 'Release Group: %s\n' % self.release_group
|
||||
|
||||
return myString
|
||||
|
||||
def fileName(self):
|
||||
return self.episodes[0].prettyName() + '.' + self.resultType
|
||||
return '\n'.join([
|
||||
'%s @ %s' % (self.provider.name, self.url),
|
||||
'Extra Info:',
|
||||
'\n'.join([' %s' % x for x in self.extraInfo]),
|
||||
'Episode: %s' % self.episodes,
|
||||
'Quality: %s' % Quality.qualityStrings[self.quality],
|
||||
'Name: %s' % self.name,
|
||||
'Size: %s' % self.size,
|
||||
'Release Group: %s' % self.release_group])
|
||||
|
||||
def get_data(self):
|
||||
if None is not self.get_data_func:
|
||||
|
@ -97,6 +91,7 @@ class SearchResult:
|
|||
return self.extraInfo[0]
|
||||
return None
|
||||
|
||||
|
||||
class NZBSearchResult(SearchResult):
|
||||
"""
|
||||
Regular NZB result with an URL to the NZB
|
||||
|
@ -122,7 +117,66 @@ class TorrentSearchResult(SearchResult):
|
|||
hash = None
|
||||
|
||||
|
||||
class AllShowsListUI:
|
||||
class ShowFilter(object):
|
||||
def __init__(self, config, log=None):
|
||||
self.config = config
|
||||
self.log = log
|
||||
self.bad_names = [re.compile('(?i)%s' % r) for r in (
|
||||
'[*]+\s*(?:403:|do not add|dupli[^s]+\s*(?:\d+|<a\s|[*])|inval)',
|
||||
'(?:inval|not? allow(ed)?)(?:[,\s]*period)?\s*[*]',
|
||||
'[*]+\s*dupli[^\s*]+\s*[*]+\s*(?:\d+|<a\s)',
|
||||
'\s(?:dupli[^s]+\s*(?:\d+|<a\s|[*]))'
|
||||
)]
|
||||
|
||||
def _is_bad_name(self, show):
|
||||
return isinstance(show, dict) and 'seriesname' in show and isinstance(show['seriesname'], (str, unicode)) \
|
||||
and any([x.search(show['seriesname']) for x in self.bad_names])
|
||||
|
||||
@staticmethod
|
||||
def _fix_firstaired(show):
|
||||
if 'firstaired' not in show:
|
||||
show['firstaired'] = '1900-01-01'
|
||||
|
||||
@staticmethod
|
||||
def _dict_prevent_none(d, key, default):
|
||||
v = None
|
||||
if isinstance(d, dict):
|
||||
v = d.get(key, default)
|
||||
return (v, default)[None is v]
|
||||
|
||||
@staticmethod
|
||||
def _fix_seriesname(show):
|
||||
if isinstance(show, dict) and 'seriesname' in show and isinstance(show['seriesname'], (str, unicode)):
|
||||
show['seriesname'] = ShowFilter._dict_prevent_none(show, 'seriesname', '').strip()
|
||||
|
||||
|
||||
class AllShowsNoFilterListUI(ShowFilter):
|
||||
"""
|
||||
This class is for indexer api. Used for searching, no filter or smart select
|
||||
"""
|
||||
|
||||
def __init__(self, config, log=None):
|
||||
super(AllShowsNoFilterListUI, self).__init__(config, log)
|
||||
|
||||
def select_series(self, all_series):
|
||||
search_results = []
|
||||
|
||||
# get all available shows
|
||||
if all_series:
|
||||
for cur_show in all_series:
|
||||
self._fix_seriesname(cur_show)
|
||||
if cur_show in search_results or self._is_bad_name(cur_show):
|
||||
continue
|
||||
|
||||
self._fix_firstaired(cur_show)
|
||||
|
||||
if cur_show not in search_results:
|
||||
search_results += [cur_show]
|
||||
|
||||
return search_results
|
||||
|
||||
|
||||
class AllShowsListUI(ShowFilter):
|
||||
"""
|
||||
This class is for indexer api. Instead of prompting with a UI to pick the
|
||||
desired result out of a list of shows it tries to be smart about it
|
||||
|
@ -130,35 +184,36 @@ class AllShowsListUI:
|
|||
"""
|
||||
|
||||
def __init__(self, config, log=None):
|
||||
self.config = config
|
||||
self.log = log
|
||||
super(AllShowsListUI, self).__init__(config, log)
|
||||
|
||||
def selectSeries(self, allSeries):
|
||||
def select_series(self, all_series):
|
||||
search_results = []
|
||||
|
||||
# get all available shows
|
||||
if allSeries:
|
||||
search_term = self.config.get('searchterm', '').lower()
|
||||
if all_series:
|
||||
search_term = self.config.get('searchterm', '').strip().lower()
|
||||
if search_term:
|
||||
# try to pick a show that's in my show list
|
||||
for cur_show in allSeries:
|
||||
if cur_show in search_results:
|
||||
for cur_show in all_series:
|
||||
self._fix_seriesname(cur_show)
|
||||
if cur_show in search_results or self._is_bad_name(cur_show):
|
||||
continue
|
||||
|
||||
seriesnames = []
|
||||
if 'seriesname' in cur_show:
|
||||
name = cur_show['seriesname'].lower()
|
||||
seriesnames += [name, unidecode(name.encode('utf-8').decode('utf-8'))]
|
||||
if 'aliasnames' in cur_show:
|
||||
name = cur_show['aliasnames'].lower()
|
||||
seriesnames += name.split('|') + unidecode(name.encode('utf-8').decode('utf-8')).split('|')
|
||||
if 'aliases' in cur_show:
|
||||
if isinstance(cur_show['aliases'], list):
|
||||
for a in cur_show['aliases']:
|
||||
name = a.strip().lower()
|
||||
seriesnames += [name, unidecode(name.encode('utf-8').decode('utf-8'))]
|
||||
elif isinstance(cur_show['aliases'], (str, unicode)):
|
||||
name = cur_show['aliases'].strip().lower()
|
||||
seriesnames += name.split('|') + unidecode(name.encode('utf-8').decode('utf-8')).split('|')
|
||||
|
||||
if search_term in set(seriesnames):
|
||||
if 'firstaired' not in cur_show:
|
||||
cur_show['firstaired'] = str(datetime.date.fromordinal(1))
|
||||
cur_show['firstaired'] = re.sub('([-]0{2})+', '', cur_show['firstaired'])
|
||||
fix_date = parser.parse(cur_show['firstaired'], fuzzy=True).date()
|
||||
cur_show['firstaired'] = fix_date.strftime('%Y-%m-%d')
|
||||
self._fix_firstaired(cur_show)
|
||||
|
||||
if cur_show not in search_results:
|
||||
search_results += [cur_show]
|
||||
|
@ -166,7 +221,7 @@ class AllShowsListUI:
|
|||
return search_results
|
||||
|
||||
|
||||
class ShowListUI:
|
||||
class ShowListUI(ShowFilter):
|
||||
"""
|
||||
This class is for tvdb-api. Instead of prompting with a UI to pick the
|
||||
desired result out of a list of shows it tries to be smart about it
|
||||
|
@ -174,20 +229,22 @@ class ShowListUI:
|
|||
"""
|
||||
|
||||
def __init__(self, config, log=None):
|
||||
self.config = config
|
||||
self.log = log
|
||||
super(ShowListUI, self).__init__(config, log)
|
||||
|
||||
def selectSeries(self, allSeries):
|
||||
def select_series(self, all_series):
|
||||
try:
|
||||
# try to pick a show that's in my show list
|
||||
for curShow in allSeries:
|
||||
for curShow in all_series:
|
||||
self._fix_seriesname(curShow)
|
||||
if self._is_bad_name(curShow):
|
||||
continue
|
||||
if filter(lambda x: int(x.indexerid) == int(curShow['id']), sickbeard.showList):
|
||||
return curShow
|
||||
except:
|
||||
except (StandardError, Exception):
|
||||
pass
|
||||
|
||||
# if nothing matches then return first result
|
||||
return allSeries[0]
|
||||
return all_series[0]
|
||||
|
||||
|
||||
class Proper:
|
||||
|
@ -214,7 +271,7 @@ class Proper:
|
|||
self.indexerid) + ' from ' + str(sickbeard.indexerApi(self.indexer).name)
|
||||
|
||||
|
||||
class ErrorViewer():
|
||||
class ErrorViewer:
|
||||
"""
|
||||
Keeps a static list of UIErrors to be displayed on the UI and allows
|
||||
the list to be cleared.
|
||||
|
@ -234,7 +291,7 @@ class ErrorViewer():
|
|||
ErrorViewer.errors = []
|
||||
|
||||
|
||||
class UIError():
|
||||
class UIError:
|
||||
"""
|
||||
Represents an error to be displayed in the web UI.
|
||||
"""
|
||||
|
@ -255,7 +312,7 @@ class OrderedDefaultdict(OrderedDict):
|
|||
args = args[1:]
|
||||
super(OrderedDefaultdict, self).__init__(*args, **kwargs)
|
||||
|
||||
def __missing__ (self, key):
|
||||
def __missing__(self, key):
|
||||
if self.default_factory is None:
|
||||
raise KeyError(key)
|
||||
self[key] = default = self.default_factory()
|
||||
|
@ -267,33 +324,38 @@ class OrderedDefaultdict(OrderedDict):
|
|||
|
||||
|
||||
class ImageUrlList(list):
|
||||
def __init__(self, iterable=None, max_age=30):
|
||||
def __init__(self, max_age=30):
|
||||
super(ImageUrlList, self).__init__()
|
||||
self.max_age = max_age
|
||||
|
||||
def add_url(self, url):
|
||||
self.remove_old()
|
||||
for x in self:
|
||||
if isinstance(x, (tuple, list)) and len(x) == 2 and url == x[0]:
|
||||
x = (x[0], datetime.datetime.now())
|
||||
cache_item = (url, datetime.datetime.now())
|
||||
for n, x in enumerate(self):
|
||||
if self._is_cache_item(x) and url == x[0]:
|
||||
self[n] = cache_item
|
||||
return
|
||||
self.append((url, datetime.datetime.now()))
|
||||
self.append(cache_item)
|
||||
|
||||
@staticmethod
|
||||
def _is_cache_item(item):
|
||||
return isinstance(item, (tuple, list)) and 2 == len(item)
|
||||
|
||||
def remove_old(self):
|
||||
age_limit = datetime.datetime.now() - datetime.timedelta(minutes=self.max_age)
|
||||
self[:] = [x for x in self if isinstance(x, (tuple, list)) and len(x) == 2 and x[1] > age_limit]
|
||||
self[:] = [x for x in self if self._is_cache_item(x) and age_limit < x[1]]
|
||||
|
||||
def __repr__(self):
|
||||
return str([x[0] for x in self if isinstance(x, (tuple, list)) and len(x) == 2])
|
||||
return str([x[0] for x in self if self._is_cache_item(x)])
|
||||
|
||||
def __contains__(self, y):
|
||||
def __contains__(self, url):
|
||||
for x in self:
|
||||
if isinstance(x, (tuple, list)) and len(x) == 2 and y == x[0]:
|
||||
if self._is_cache_item(x) and url == x[0]:
|
||||
return True
|
||||
return False
|
||||
|
||||
def remove(self, x):
|
||||
for v in self:
|
||||
if isinstance(v, (tuple, list)) and len(v) == 2 and v[0] == x:
|
||||
super(ImageUrlList, self).remove(v)
|
||||
def remove(self, url):
|
||||
for x in self:
|
||||
if self._is_cache_item(x) and url == x[0]:
|
||||
super(ImageUrlList, self).remove(x)
|
||||
break
|
||||
|
|
|
@ -449,7 +449,8 @@ class ConfigMigrator():
|
|||
12: 'Add "hevc" and some non-english languages to ignore words if not found',
|
||||
13: 'Change default dereferrer url to blank',
|
||||
14: 'Convert Trakt to multi-account',
|
||||
15: 'Transmithe.net rebranded Nebulance'}
|
||||
15: 'Transmithe.net rebranded Nebulance',
|
||||
16: 'Purge old cache image folders'}
|
||||
|
||||
def migrate_config(self):
|
||||
""" Calls each successive migration until the config is the same version as SG expects """
|
||||
|
@ -807,3 +808,18 @@ class ConfigMigrator():
|
|||
neb.search_fallback = bool(check_setting_int(self.config_obj, old_id_uc, old_id + '_search_fallback', 0))
|
||||
neb.seed_time = check_setting_int(self.config_obj, old_id_uc, old_id + '_seed_time', '')
|
||||
neb._seed_ratio = check_setting_str(self.config_obj, old_id_uc, old_id + '_seed_ratio', '')
|
||||
|
||||
# Migration v16: Purge old cache image folder name
|
||||
@staticmethod
|
||||
def _migrate_v16():
|
||||
if sickbeard.CACHE_DIR and ek.ek(os.path.isdir, sickbeard.CACHE_DIR):
|
||||
cache_default = sickbeard.CACHE_DIR
|
||||
dead_paths = ['anidb', 'imdb', 'trakt']
|
||||
for path in dead_paths:
|
||||
sickbeard.CACHE_DIR = '%s/images/%s' % (cache_default, path)
|
||||
helpers.clearCache(True)
|
||||
try:
|
||||
ek.ek(os.rmdir, sickbeard.CACHE_DIR)
|
||||
except OSError:
|
||||
pass
|
||||
sickbeard.CACHE_DIR = cache_default
|
||||
|
|
|
@ -1478,8 +1478,8 @@ def cleanup_cache():
|
|||
"""
|
||||
Delete old cached files
|
||||
"""
|
||||
delete_not_changed_in([ek.ek(os.path.join, sickbeard.CACHE_DIR, *x) for x in [
|
||||
('images', 'trakt'), ('images', 'imdb'), ('images', 'anidb')]])
|
||||
delete_not_changed_in([ek.ek(os.path.join, sickbeard.CACHE_DIR, 'images', 'browse', 'thumb', x) for x in [
|
||||
'anidb', 'imdb', 'trakt', 'tvdb']])
|
||||
|
||||
|
||||
def delete_not_changed_in(paths, days=30, minutes=0):
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
from lib.tvdb_api.tvdb_api import Tvdb
|
||||
from lib.libtrakt.indexerapiinterface import TraktIndexer
|
||||
|
||||
INDEXER_TVDB = 1
|
||||
INDEXER_TVRAGE = 2
|
||||
|
@ -65,9 +66,9 @@ indexerConfig = {
|
|||
main_url='https://www.trakt.tv/',
|
||||
id=INDEXER_TRAKT,
|
||||
name='Trakt',
|
||||
module=None,
|
||||
module=TraktIndexer,
|
||||
api_params={},
|
||||
active=False,
|
||||
active=True,
|
||||
dupekey='trakt',
|
||||
mapped_only=True,
|
||||
icon='trakt16.png',
|
||||
|
|
|
@ -48,7 +48,7 @@ from sickbeard.common import SNATCHED, UNAIRED, IGNORED, ARCHIVED, WANTED, FAILE
|
|||
from sickbeard.common import SD, HD720p, HD1080p, UHD2160p
|
||||
from sickbeard.exceptions import ex
|
||||
from sickbeard.helpers import has_image_ext, remove_article, starify
|
||||
from sickbeard.indexers.indexer_config import INDEXER_TVDB, INDEXER_TVRAGE
|
||||
from sickbeard.indexers.indexer_config import INDEXER_TVDB, INDEXER_TVRAGE, INDEXER_TRAKT
|
||||
from sickbeard.scene_numbering import get_scene_numbering, set_scene_numbering, get_scene_numbering_for_show, \
|
||||
get_xem_numbering_for_show, get_scene_absolute_numbering_for_show, get_xem_absolute_numbering_for_show, \
|
||||
get_scene_absolute_numbering
|
||||
|
@ -67,10 +67,12 @@ from unidecode import unidecode
|
|||
|
||||
from lib.libtrakt import TraktAPI
|
||||
from lib.libtrakt.exceptions import TraktException, TraktAuthException
|
||||
from lib.libtrakt.indexerapiinterface import TraktSearchTypes
|
||||
from trakt_helpers import build_config, trakt_collection_remove_account
|
||||
from sickbeard.bs4_parser import BS4Parser
|
||||
from lib.tmdb_api import TMDB
|
||||
from lib.tvdb_api.tvdb_exceptions import tvdb_exception
|
||||
from lib.fuzzywuzzy import fuzz
|
||||
|
||||
try:
|
||||
import json
|
||||
|
@ -2573,22 +2575,31 @@ class NewHomeAddShows(Home):
|
|||
results = {}
|
||||
final_results = []
|
||||
|
||||
search_id, indexer_id = '', None
|
||||
search_id = ''
|
||||
search_id, indexer_id, trakt_id, tmdb_id, INDEXER_TVDB_X = '', None, None, None, INDEXER_TRAKT
|
||||
try:
|
||||
search_id = re.search(r'(?m)((?:tt\d{4,})|^\d{4,}$)', search_term).group(1)
|
||||
resp = [r for r in self.getTrakt('/search/%s/%s?type=show&extended=full' % (
|
||||
('tvdb', 'imdb')['tt' in search_id], search_id)) if 'show' == r['type']][0]
|
||||
search_term = resp['show']['title']
|
||||
indexer_id = resp['show']['ids']['tvdb']
|
||||
except:
|
||||
|
||||
lINDEXER_API_PARMS = sickbeard.indexerApi(INDEXER_TVDB_X).api_params.copy()
|
||||
lINDEXER_API_PARMS['language'] = lang
|
||||
lINDEXER_API_PARMS['custom_ui'] = classes.AllShowsNoFilterListUI
|
||||
lINDEXER_API_PARMS['sleep_retry'] = 5
|
||||
lINDEXER_API_PARMS['search_type'] = (TraktSearchTypes.tvdb_id, TraktSearchTypes.imdb_id)['tt' in search_id]
|
||||
t = sickbeard.indexerApi(INDEXER_TVDB_X).indexer(**lINDEXER_API_PARMS)
|
||||
|
||||
resp = t[search_id][0]
|
||||
search_term = resp['seriesname']
|
||||
indexer_id = resp['ids']['tvdb']
|
||||
trakt_id = resp['ids'].get('trakt')
|
||||
tmdb_id = resp['ids'].get('tmdb')
|
||||
|
||||
except (StandardError, Exception):
|
||||
search_term = (search_term, '')['tt' in search_id]
|
||||
|
||||
# Query Indexers for each search term and build the list of results
|
||||
for indexer in sickbeard.indexerApi().indexers if not int(indexer) else [int(indexer)]:
|
||||
lINDEXER_API_PARMS = sickbeard.indexerApi(indexer).api_params.copy()
|
||||
lINDEXER_API_PARMS['language'] = lang
|
||||
lINDEXER_API_PARMS['custom_ui'] = classes.AllShowsListUI
|
||||
lINDEXER_API_PARMS['custom_ui'] = classes.AllShowsNoFilterListUI
|
||||
t = sickbeard.indexerApi(indexer).indexer(**lINDEXER_API_PARMS)
|
||||
|
||||
try:
|
||||
|
@ -2596,96 +2607,112 @@ class NewHomeAddShows(Home):
|
|||
if bool(indexer_id):
|
||||
logger.log('Fetching show using id: %s (%s) from tv datasource %s' % (
|
||||
search_id, search_term, sickbeard.indexerApi(indexer).name), logger.DEBUG)
|
||||
results.setdefault('tt' in search_id and 3 or indexer, []).extend(
|
||||
[{'id': indexer_id, 'seriesname': t[indexer_id, False]['seriesname'],
|
||||
'firstaired': t[indexer_id, False]['firstaired'], 'network': t[indexer_id, False]['network'],
|
||||
'overview': t[indexer_id, False]['overview'],
|
||||
'genres': '' if not t[indexer_id, False]['genre'] else
|
||||
t[indexer_id, False]['genre'].lower().strip('|').replace('|', ', '),
|
||||
}])
|
||||
r = t[indexer_id, False]
|
||||
results.setdefault('tt' in search_id and INDEXER_TVDB_X or indexer, {})[int(indexer_id)] = {
|
||||
'id': indexer_id, 'seriesname': r['seriesname'], 'firstaired': r['firstaired'],
|
||||
'network': r['network'], 'overview': r['overview'],
|
||||
'genres': '' if not r['genre'] else r['genre'].lower().strip('|').replace('|', ', '),
|
||||
'trakt_id': trakt_id, 'tmdb_id': tmdb_id
|
||||
}
|
||||
break
|
||||
else:
|
||||
logger.log('Searching for shows using search term: %s from tv datasource %s' % (
|
||||
search_term, sickbeard.indexerApi(indexer).name), logger.DEBUG)
|
||||
tvdb_ids = []
|
||||
results.setdefault(indexer, {})
|
||||
for term in terms:
|
||||
try:
|
||||
for r in t[term]:
|
||||
tvdb_id = int(r['id'])
|
||||
if tvdb_id not in tvdb_ids:
|
||||
tvdb_ids.append(tvdb_id)
|
||||
results.setdefault(indexer, []).extend([r.copy()])
|
||||
if tvdb_id not in results[indexer]:
|
||||
results.setdefault(indexer, {})[tvdb_id] = r.copy()
|
||||
elif r['seriesname'] != results[indexer][tvdb_id]['seriesname']:
|
||||
results[indexer][tvdb_id].setdefault('aliases', []).append(r['seriesname'])
|
||||
except tvdb_exception:
|
||||
pass
|
||||
except Exception as e:
|
||||
except (StandardError, Exception):
|
||||
pass
|
||||
|
||||
# Query trakt for tvdb ids
|
||||
try:
|
||||
logger.log('Searching for show using search term: %s from tv datasource Trakt' % search_term, logger.DEBUG)
|
||||
resp = []
|
||||
for term in terms:
|
||||
result = self.getTrakt('/search/show?query=%s&extended=full' % term)
|
||||
resp += result
|
||||
match = False
|
||||
for r in result:
|
||||
if term == r.get('show', {}).get('title', ''):
|
||||
match = True
|
||||
if match:
|
||||
# Query trakt for tvdb ids
|
||||
try:
|
||||
logger.log('Searching for show using search term: %s from tv datasource Trakt' % search_term, logger.DEBUG)
|
||||
resp = []
|
||||
lINDEXER_API_PARMS = sickbeard.indexerApi(INDEXER_TVDB_X).api_params.copy()
|
||||
lINDEXER_API_PARMS['language'] = lang
|
||||
lINDEXER_API_PARMS['custom_ui'] = classes.AllShowsNoFilterListUI
|
||||
lINDEXER_API_PARMS['sleep_retry'] = 5
|
||||
lINDEXER_API_PARMS['search_type'] = TraktSearchTypes.text
|
||||
t = sickbeard.indexerApi(INDEXER_TVDB_X).indexer(**lINDEXER_API_PARMS)
|
||||
|
||||
for term in terms:
|
||||
result = t[term]
|
||||
resp += result
|
||||
match = False
|
||||
for r in result:
|
||||
if isinstance(r.get('seriesname'), (str, unicode)) \
|
||||
and term.lower() == r.get('seriesname', '').lower():
|
||||
match = True
|
||||
break
|
||||
tvdb_ids = []
|
||||
results_trakt = []
|
||||
for item in resp:
|
||||
show = item['show']
|
||||
if 'tvdb' in show['ids'] and show['ids']['tvdb'] and show['ids']['tvdb'] not in tvdb_ids:
|
||||
results_trakt.append({
|
||||
'id': show['ids']['tvdb'], 'seriesname': show['title'],
|
||||
'firstaired': (show['first_aired'] and re.sub(r'T.*$', '', str(show['first_aired'])) or show['year']),
|
||||
'network': show['network'], 'overview': show['overview'],
|
||||
'genres': ', '.join(['%s' % v.lower() for v in show.get('genres', {}) or []])})
|
||||
tvdb_ids.append(show['ids']['tvdb'])
|
||||
results.update({3: results_trakt})
|
||||
if INDEXER_TVDB in results:
|
||||
tvdb_filtered = []
|
||||
for tvdb_item in results[INDEXER_TVDB]:
|
||||
if int(tvdb_item['id']) not in tvdb_ids:
|
||||
tvdb_filtered.append(tvdb_item)
|
||||
if tvdb_filtered:
|
||||
results[INDEXER_TVDB] = tvdb_filtered
|
||||
else:
|
||||
del(results[INDEXER_TVDB])
|
||||
except:
|
||||
pass
|
||||
if match:
|
||||
break
|
||||
results_trakt = {}
|
||||
for item in resp:
|
||||
if 'tvdb' in item['ids'] and item['ids']['tvdb']:
|
||||
if item['ids']['tvdb'] not in results[INDEXER_TVDB]:
|
||||
results_trakt[int(item['ids']['tvdb'])] = {
|
||||
'id': item['ids']['tvdb'], 'seriesname': item['seriesname'],
|
||||
'genres': item['genres'].lower(), 'network': item['network'],
|
||||
'overview': item['overview'], 'firstaired': item['firstaired'],
|
||||
'trakt_id': item['ids']['trakt'], 'tmdb_id': item['ids']['tmdb']}
|
||||
elif item['seriesname'] != results[INDEXER_TVDB][int(item['ids']['tvdb'])]['seriesname']:
|
||||
results[INDEXER_TVDB][int(item['ids']['tvdb'])].setdefault(
|
||||
'aliases', []).append(item['seriesname'])
|
||||
results.setdefault(INDEXER_TVDB_X, {}).update(results_trakt)
|
||||
except (StandardError, Exception):
|
||||
pass
|
||||
|
||||
id_names = [None, sickbeard.indexerApi(INDEXER_TVDB).name, sickbeard.indexerApi(INDEXER_TVRAGE).name,
|
||||
'%s via Trakt' % sickbeard.indexerApi(INDEXER_TVDB).name]
|
||||
id_names = {iid: (name, '%s via %s' % (sickbeard.indexerApi(INDEXER_TVDB).name, name))[INDEXER_TVDB_X == iid]
|
||||
for iid, name in sickbeard.indexerApi().all_indexers.iteritems()}
|
||||
# noinspection PyUnboundLocalVariable
|
||||
map(final_results.extend,
|
||||
([['%s%s' % (id_names[id], helpers.findCertainShow(sickbeard.showList, int(show['id'])) and ' - <span class="exists-db">exists in db</span>' or ''),
|
||||
(id, INDEXER_TVDB)[id == 3], sickbeard.indexerApi((id, INDEXER_TVDB)[id == 3]).config['show_url'], int(show['id']),
|
||||
([['%s%s' % (id_names[id], helpers.find_show_by_id(sickbeard.showList, {(id, INDEXER_TVDB)[id == INDEXER_TVDB_X]: int(show['id'])}, no_mapped_ids=False) and ' - <span class="exists-db">exists in db</span>' or ''),
|
||||
(id, INDEXER_TVDB)[id == INDEXER_TVDB_X], sickbeard.indexerApi((id, INDEXER_TVDB)[id == INDEXER_TVDB_X]).config['show_url'], int(show['id']),
|
||||
show['seriesname'], self.encode_html(show['seriesname']), show['firstaired'],
|
||||
show.get('network', '') or '', show.get('genres', '') or '',
|
||||
re.sub(r'([,\.!][^,\.!]*?)$', '...',
|
||||
re.sub(r'([!\?\.])(?=\w)', r'\1 ',
|
||||
self.encode_html((show.get('overview', '') or '')[:250:].strip())))
|
||||
] for show in shows] for id, shows in results.items()))
|
||||
self.encode_html((show.get('overview', '') or '')[:250:].strip()))),
|
||||
self._get_UWRatio(term, show['seriesname'], show.get('aliases', [])), None, None,
|
||||
self._make_search_image_url(iid, show)
|
||||
] for show in shows.itervalues()] for iid, shows in results.iteritems()))
|
||||
|
||||
lang_id = sickbeard.indexerApi().config['langabbv_to_id'][lang]
|
||||
return json.dumps({
|
||||
'results': sorted(final_results, reverse=True, key=lambda x: dateutil.parser.parse(
|
||||
re.match('^(?:19|20)\d\d$', str(x[6])) and ('%s-12-31' % str(x[6])) or (x[6] and str(x[6])) or '1900')),
|
||||
'results': sorted(final_results, reverse=True, key=lambda x: x[10]),
|
||||
'langid': lang_id})
|
||||
# return json.dumps({
|
||||
# 'results': sorted(final_results, reverse=True, key=lambda x: dateutil.parser.parse(
|
||||
# re.match('^(?:19|20)\d\d$', str(x[6])) and ('%s-12-31' % str(x[6])) or (x[6] and str(x[6])) or '1900')),
|
||||
# 'langid': lang_id})
|
||||
|
||||
def getTrakt(self, url, *args, **kwargs):
|
||||
@staticmethod
|
||||
def _make_search_image_url(iid, show):
|
||||
img_url = ''
|
||||
if INDEXER_TRAKT == iid:
|
||||
img_url = 'imagecache?path=browse/thumb/trakt&filename=%s&tmdbid=%s&tvdbid=%s' % \
|
||||
('%s.jpg' % show['trakt_id'], show.get('tmdb_id'), show.get('id'))
|
||||
elif INDEXER_TVDB == iid:
|
||||
img_url = 'imagecache?path=browse/thumb/tvdb&filename=%s&tvdbid=%s' % \
|
||||
('%s.jpg' % show['id'], show['id'])
|
||||
return img_url
|
||||
|
||||
filtered = []
|
||||
try:
|
||||
resp = TraktAPI().trakt_request(url, sleep_retry=5)
|
||||
if len(resp):
|
||||
filtered = resp
|
||||
except TraktException as e:
|
||||
logger.log(u'Could not connect to Trakt service: %s' % ex(e), logger.WARNING)
|
||||
|
||||
return filtered
|
||||
def _get_UWRatio(self, search_term, showname, aliases):
|
||||
s = fuzz.UWRatio(search_term, showname)
|
||||
# check aliases and give them a little lower score
|
||||
for a in aliases:
|
||||
ns = fuzz.UWRatio(search_term, a) - 1
|
||||
if ns > s:
|
||||
s = ns
|
||||
return s
|
||||
|
||||
def massAddTable(self, rootDir=None, **kwargs):
|
||||
t = PageTemplate(headers=self.request.headers, file='home_massAddTable.tmpl')
|
||||
|
@ -2900,7 +2927,7 @@ class NewHomeAddShows(Home):
|
|||
newest = dt_string
|
||||
|
||||
img_uri = 'http://img7.anidb.net/pics/anime/%s' % image
|
||||
images = dict(poster=dict(thumb='imagecache?path=anidb&source=%s' % img_uri))
|
||||
images = dict(poster=dict(thumb='imagecache?path=browse/thumb/anidb&source=%s' % img_uri))
|
||||
sickbeard.CACHE_IMAGE_URL_LIST.add_url(img_uri)
|
||||
|
||||
votes = rating = 0
|
||||
|
@ -3050,7 +3077,7 @@ class NewHomeAddShows(Home):
|
|||
dims = [row.get('poster', {}).get('width', 0), row.get('poster', {}).get('height', 0)]
|
||||
s = [scale(x, int(max(dims))) for x in dims]
|
||||
img_uri = re.sub('(?im)(.*V1_?)(\..*?)$', r'\1UX%s_CR0,0,%s,%s_AL_\2' % (s[0], s[0], s[1]), img_uri)
|
||||
images = dict(poster=dict(thumb='imagecache?path=imdb&source=%s' % img_uri))
|
||||
images = dict(poster=dict(thumb='imagecache?path=browse/thumb/imdb&source=%s' % img_uri))
|
||||
sickbeard.CACHE_IMAGE_URL_LIST.add_url(img_uri)
|
||||
|
||||
filtered.append(dict(
|
||||
|
@ -3133,7 +3160,7 @@ class NewHomeAddShows(Home):
|
|||
match.group(12)]
|
||||
img_uri = img_uri.replace(match.group(), ''.join(
|
||||
[str(y) for x in map(None, parts, scaled) for y in x if y is not None]))
|
||||
images = dict(poster=dict(thumb='imagecache?path=imdb&source=%s' % img_uri))
|
||||
images = dict(poster=dict(thumb='imagecache?path=browse/thumb/imdb&source=%s' % img_uri))
|
||||
sickbeard.CACHE_IMAGE_URL_LIST.add_url(img_uri)
|
||||
|
||||
filtered.append(dict(
|
||||
|
@ -3401,7 +3428,7 @@ class NewHomeAddShows(Home):
|
|||
tmdbid = item.get('show', {}).get('ids', {}).get('tmdb', 0)
|
||||
tvdbid = item.get('show', {}).get('ids', {}).get('tvdb', 0)
|
||||
traktid = item.get('show', {}).get('ids', {}).get('trakt', 0)
|
||||
images = dict(poster=dict(thumb='imagecache?path=trakt/poster/thumb&filename=%s&tmdbid=%s&tvdbid=%s' %
|
||||
images = dict(poster=dict(thumb='imagecache?path=browse/thumb/trakt&filename=%s&tmdbid=%s&tvdbid=%s' %
|
||||
('%s.jpg' % traktid, tmdbid, tvdbid)))
|
||||
|
||||
filtered.append(dict(
|
||||
|
@ -6107,26 +6134,33 @@ class CachedImages(MainHandler):
|
|||
tmdbimage = False
|
||||
if source is not None and source in sickbeard.CACHE_IMAGE_URL_LIST:
|
||||
s = source
|
||||
if source is None and tmdbid not in [None, 0, '0'] and self.should_try_image(static_image_path, 'tmdb'):
|
||||
if source is None and tmdbid not in [None, 'None', 0, '0'] \
|
||||
and self.should_try_image(static_image_path, 'tmdb'):
|
||||
tmdbimage = True
|
||||
try:
|
||||
tmdbapi = TMDB(sickbeard.TMDB_API_KEY)
|
||||
tmdbconfig = tmdbapi.Configuration().info()
|
||||
images = tmdbapi.TV(helpers.tryInt(tmdbid)).images()
|
||||
s = '%s%s%s' % (tmdbconfig['images']['base_url'], tmdbconfig['images']['poster_sizes'][3], sorted(images['posters'], key=lambda x: x['vote_average'], reverse=True)[0]['file_path']) if len(images['posters']) > 0 else ''
|
||||
except:
|
||||
s = '%s%s%s' % (tmdbconfig['images']['base_url'], tmdbconfig['images']['poster_sizes'][3],
|
||||
sorted(images['posters'], key=lambda x: x['vote_average'],
|
||||
reverse=True)[0]['file_path']) if len(images['posters']) > 0 else ''
|
||||
except (StandardError, Exception):
|
||||
s = ''
|
||||
if s and not helpers.download_file(s, static_image_path) and s.find('trakt.us'):
|
||||
helpers.download_file(s.replace('trakt.us', 'trakt.tv'), static_image_path)
|
||||
if tmdbimage and not ek.ek(os.path.isfile, static_image_path):
|
||||
self.create_dummy_image(static_image_path, 'tmdb')
|
||||
|
||||
if source is None and tvdbid not in [None, 0, '0'] and not ek.ek(os.path.isfile, static_image_path) and self.should_try_image(static_image_path, 'tvdb'):
|
||||
if source is None and tvdbid not in [None, 'None', 0, '0'] \
|
||||
and not ek.ek(os.path.isfile, static_image_path) \
|
||||
and self.should_try_image(static_image_path, 'tvdb'):
|
||||
try:
|
||||
r = sickbeard.indexerApi(INDEXER_TVDB).indexer()[helpers.tryInt(tvdbid), False]
|
||||
lINDEXER_API_PARMS = sickbeard.indexerApi(INDEXER_TVDB).api_params.copy()
|
||||
lINDEXER_API_PARMS['posters'] = True
|
||||
r = sickbeard.indexerApi(INDEXER_TVDB).indexer(**lINDEXER_API_PARMS)[helpers.tryInt(tvdbid), False]
|
||||
if hasattr(r, 'data') and 'poster' in r.data:
|
||||
s = r.data['poster']
|
||||
except:
|
||||
except (StandardError, Exception):
|
||||
s = ''
|
||||
if s:
|
||||
helpers.download_file(s, static_image_path)
|
||||
|
@ -6137,7 +6171,12 @@ class CachedImages(MainHandler):
|
|||
self.delete_all_dummy_images(static_image_path)
|
||||
|
||||
if not ek.ek(os.path.isfile, static_image_path):
|
||||
self.redirect('images/trans.png')
|
||||
static_image_path = ek.ek(os.path.join, sickbeard.PROG_DIR, 'gui', 'slick', 'images', 'trans.png')
|
||||
else:
|
||||
helpers.set_file_timestamp(static_image_path, min_age=3, new_time=None)
|
||||
self.redirect('cache/images/%s/%s' % (path, file_name))
|
||||
|
||||
mime_type, encoding = MimeTypes().guess_type(static_image_path)
|
||||
self.set_header('Content-Type', mime_type)
|
||||
with open(static_image_path, 'rb') as img:
|
||||
return img.read()
|
||||
|
||||
|
|
Loading…
Reference in a new issue