diff --git a/CHANGES.md b/CHANGES.md
index f3106eef..12b95882 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -89,6 +89,16 @@
* Add new parameter 'poster' to indexer api
* Add optional tvdb_api load season image: lINDEXER_API_PARMS['seasons'] = True
* Add optional tvdb_api load season wide image: lINDEXER_API_PARMS['seasonwides'] = True
+* Add Fuzzywuzzy 0.15.1 to sort search results
+* Change remove search results filtering from tv info source
+* Change suppress startup warnings for Fuzzywuzzy and Cheetah libs
+* Change show search, add options to choose order of search results
+* Add option to sort search results by 'A to Z' or 'First aired'
+* Add option to sort search results by 'Relevancy' using Fuzzywuzzy lib
+* Change search result anchor text uses SORT_ARTICLE setting for display
+* Change existing shows in DB are no longer selectable in result list
+* Change add image to search result item hover over
+* Change improve image load speed on browse Trakt/IMDb/AniDB pages
[develop changelog]
diff --git a/SickBeard.py b/SickBeard.py
index 1be1610e..8248be6b 100755
--- a/SickBeard.py
+++ b/SickBeard.py
@@ -32,6 +32,10 @@ import shutil
import subprocess
import time
import threading
+import warnings
+
+warnings.filterwarnings('ignore', module=r'.*fuzzywuzzy.*')
+warnings.filterwarnings('ignore', module=r'.*Cheetah.*')
if not (2, 7, 9) <= sys.version_info < (3, 0):
print('Python %s.%s.%s detected.' % sys.version_info[:3])
diff --git a/gui/slick/css/dark.css b/gui/slick/css/dark.css
index 58487684..89657f86 100644
--- a/gui/slick/css/dark.css
+++ b/gui/slick/css/dark.css
@@ -1289,6 +1289,7 @@ input sizing (for config pages)
========================================================================== */
.showlist-select optgroup,
+#results-sortby optgroup,
#pickShow optgroup,
#showfilter optgroup,
#showsort optgroup,
@@ -1298,6 +1299,7 @@ input sizing (for config pages)
}
.showlist-select optgroup option,
+#results-sortby optgroup option,
#pickShow optgroup option,
#showfilter optgroup option,
#showsort optgroup option,
diff --git a/gui/slick/css/light.css b/gui/slick/css/light.css
index d031ef28..c057dd3c 100644
--- a/gui/slick/css/light.css
+++ b/gui/slick/css/light.css
@@ -1254,6 +1254,7 @@ input sizing (for config pages)
========================================================================== */
.showlist-select optgroup,
+#results-sortby optgroup,
#pickShow optgroup,
#showfilter optgroup,
#showsort optgroup,
@@ -1263,6 +1264,7 @@ input sizing (for config pages)
}
.showlist-select optgroup option,
+#results-sortby optgroup option,
#pickShow optgroup option,
#showfilter optgroup option,
#showsort optgroup option,
diff --git a/gui/slick/css/style.css b/gui/slick/css/style.css
index 68fbfda4..8eca368a 100644
--- a/gui/slick/css/style.css
+++ b/gui/slick/css/style.css
@@ -1113,11 +1113,16 @@ div.formpaginate{
margin-left:10px
}
-.stepDiv #searchResults div{
+.stepDiv #searchResults .results-item{
+ width:100%;
line-height:1.7
}
-.stepDiv #searchResults div .exists-db{
+.stepDiv #searchResults .results-item input[disabled=disabled]{
+ visibility:hidden
+}
+
+.stepDiv #searchResults .results-item .exists-db{
font-weight:800;
font-style:italic
}
@@ -1126,6 +1131,11 @@ div.formpaginate{
margin-right:6px
}
+a span.article,
+a:hover span.article{
+ color:#2f4799
+}
+
.stepone-result-title{
font-weight:600;
margin-left:10px
@@ -2785,6 +2795,7 @@ config*.tmpl
padding-top:10px
}
+select .selected-text,
select .selected{
font-weight:700;
color:#888
diff --git a/gui/slick/images/image-light.png b/gui/slick/images/image-light.png
new file mode 100644
index 00000000..59e658b1
Binary files /dev/null and b/gui/slick/images/image-light.png differ
diff --git a/gui/slick/interfaces/default/home_newShow.tmpl b/gui/slick/interfaces/default/home_newShow.tmpl
index 9fa70d38..bbb23756 100644
--- a/gui/slick/interfaces/default/home_newShow.tmpl
+++ b/gui/slick/interfaces/default/home_newShow.tmpl
@@ -15,7 +15,11 @@
#set indexer_count = len([$i for $i in $sickbeard.indexerApi().indexers if $sickbeard.indexerApi(i).config.get('active', False) and not $sickbeard.indexerApi(i).config.get('defunct', False)]) + 1
diff --git a/gui/slick/js/newShow.js b/gui/slick/js/newShow.js
index f7dc5ead..0a5da2c8 100644
--- a/gui/slick/js/newShow.js
+++ b/gui/slick/js/newShow.js
@@ -1,3 +1,5 @@
+/** @namespace config.sortArticle */
+/** @namespace config.resultsSortby */
$(document).ready(function () {
function populateLangSelect() {
@@ -71,59 +73,146 @@ $(document).ready(function () {
$('#searchResults').empty().html('search timed out, try again or try another database');
},
success: function (data) {
- var resultStr = '', checked = '', rowType, row = 0;
+ var resultStr = '', attrs = '', checked = !1, rowType, row = 0, srcState = '';
if (0 === data.results.length) {
resultStr += 'Sorry, no results found. Try a different search.';
} else {
- var idxSrcDB = 0, idxSrcDBId = 1, idxSrcUrl = 2, idxShowID = 3, idxTitle = 4, idxTitleHtml = 5,
- idxDate = 6, idxNetwork = 7, idxGenres = 8, idxOverview = 9;
- $.each(data.results, function (index, obj) {
- checked = (0 == row ? ' checked' : '');
- rowType = (0 == row % 2 ? '' : ' class="alt"');
+ var result = {
+ SrcName: 0, isInDB: 1, SrcId: 2, SrcDBId: 3, SrcUrl: 4, ShowID: 5, Title: 6, TitleHtml: 7,
+ Aired: 8, Network: 9, Genre: 10, Overview: 11, RelSort: 12, DateSort: 13, AzSort: 14, ImgUrl: 15
+ };
+ $.each(data.results, function (index, item) {
+ attrs = (!0 === item[result.isInDB] ? ' disabled="disabled"' : (!0 === checked ? '' : ' checked'));
+ checked = (' checked' === attrs) ? !0 : checked;
+ rowType = (0 == row % 2 ? '' : ' alt');
row++;
- var display_show_name = cleanseText(obj[idxTitle], !0), showstartdate = '';
+ var displayShowName = cleanseText(item[result.Title], !0), showstartdate = '';
- if (null !== obj[idxDate]) {
- var startDate = new Date(obj[idxDate]);
+ if (null !== item[result.Aired]) {
+ var startDate = new Date(item[result.Aired]);
var today = new Date();
showstartdate = ' ('
+ (startDate > today ? 'will debut' : 'started')
- + ': ' + obj[idxDate] + ')';
+ + ': ' + item[result.Aired] + ')';
}
- resultStr += '
'
+
+ srcState = [
+ null === item[result.SrcName] ? '' : item[result.SrcName],
+ !1 === item[result.isInDB] ? '' : '
exists in db']
+ .join(' - ').replace(/(^[\s-]+|[\s-]+$)/, '');
+ resultStr += '
'
+ + (0 < item[result.Genre].length ? '
(' + item[result.Genre] + ')
' : '')
+ + (0 < item[result.Network].length ? '
' + item[result.Network] + '
' : '')
+ + '
'
+ + (0 < item[result.Overview].length ? '
' + item[result.Overview] + '
' : '')
+ + '
Click for more'
+ '"'
- + ' href="' + anonURL + obj[idxSrcUrl] + obj[idxShowID] + ((data.langid && '' != data.langid) ? '&lid=' + data.langid : '') + '"'
+ + ' href="' + anonURL + item[result.SrcUrl] + item[result.ShowID] + ((data.langid && '' != data.langid) ? '&lid=' + data.langid : '') + '"'
+ ' onclick="window.open(this.href, \'_blank\'); return false;"'
- + '>' + display_show_name + ''
+ + '>' + (config.sortArticle ? displayShowName : displayShowName.replace(/^((?:A(?!\s+to)n?)|The)(\s)+(.*)/i, '$3$2
($1)')) + ''
+ showstartdate
- + (null == obj[idxSrcDB] ? ''
- : '
' + '[' + obj[idxSrcDB] + ']' + '')
+ + ('' === srcState ? ''
+ : '
' + '[' + srcState + ']' + '')
+ '
' + "\n";
+
});
}
+ var selAttr = 'selected="selected" ',
+ selClass = 'selected-text',
+ classAttrSel = 'class="' + selClass + '" ',
+ defSortby = /^az/.test(config.resultsSortby) || /^date/.test(config.resultsSortby) ? '': classAttrSel + selAttr;
+
$('#searchResults').html(
''
);
+
+ var container$ = $('#holder'),
+ sortbySelect$ = $('#results-sortby'),
+ reOrder = (function(value){
+ return ($('#results-sortby').find('option[value$="notop"]').hasClass(selClass)
+ ? (1000 > value ? value + 1000 : value)
+ : (1000 > value ? value : value - 1000))}),
+ getData = (function(itemElem, sortby){
+ var position = parseInt($(itemElem).attr('data-sort-' + sortby));
+ return (!$(itemElem).attr('data-indb')) ? position : reOrder(position);
+ });
+
+ sortbySelect$.find('.' + selClass).each(function(){
+ $(this).html('> ' + $(this).html());
+ });
+
+ container$.isotope({
+ itemSelector: '.results-item',
+ sortBy: sortbySelect$.find('option:not([value$="top"]).' + selClass).val(),
+ layoutMode: 'masonry',
+ getSortData: {
+ az: function(itemElem){ return getData(itemElem, 'az'); },
+ date: function(itemElem){ return getData(itemElem, 'date'); },
+ rel: function(itemElem){ return getData(itemElem, 'rel'); }
+ }
+ }).on('arrangeComplete', function(event, items){
+ $(items).each(function(i, item){
+ if (1 === i % 2){
+ $(item.element).addClass('alt');
+ }
+ });
+ });
+
+ sortbySelect$.on('change', function(){
+ var selectedSort = String($(this).val()), sortby = selectedSort, curSortby$, curSel$, newSel$;
+
+ curSortby$ = $(this).find('option:not([value$="top"])');
+ if (/top$/.test(selectedSort)){
+ sortby = curSortby$.filter('.' + selClass).val();
+ curSortby$ = $(this).find('option[value$="top"]');
+ }
+ curSel$ = curSortby$.filter('.' + selClass);
+ curSel$.html(curSel$.html().replace(/(?:>|>)\s/ , '')).removeClass(selClass);
+
+ newSel$ = $(this).find('option[value$="' + selectedSort + '"]');
+ newSel$.html('> ' + newSel$.html()).addClass(selClass);
+
+ $('.results-item[data-indb="1"]').each(function(){
+ $(this).attr(sortby, reOrder(parseInt($(this).attr(sortby), 10)));
+ });
+ $('.results-item').removeClass('alt');
+ container$.isotope('updateSortData').isotope({sortBy: sortby});
+
+ config.resultsSortby = sortby + ($(this).find('option[value$="notop"]').hasClass(selClass) ? ' notop' : '');
+ $.get(sbRoot + '/config/general/saveResultPrefs', {ui_results_sortby: selectedSort});
+ });
+
updateSampleText();
myform.loadsection(0);
$('.stepone-result-radio, .stepone-result-title').each(addQTip);
diff --git a/lib/fuzzywuzzy/StringMatcher.py b/lib/fuzzywuzzy/StringMatcher.py
new file mode 100644
index 00000000..d35e075f
--- /dev/null
+++ b/lib/fuzzywuzzy/StringMatcher.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+# encoding: utf-8
+"""
+StringMatcher.py
+
+ported from python-Levenshtein
+[https://github.com/miohtama/python-Levenshtein]
+License available here: https://github.com/miohtama/python-Levenshtein/blob/master/COPYING
+"""
+
+from Levenshtein import *
+from warnings import warn
+
+
+class StringMatcher:
+ """A SequenceMatcher-like class built on the top of Levenshtein"""
+
+ def _reset_cache(self):
+ self._ratio = self._distance = None
+ self._opcodes = self._editops = self._matching_blocks = None
+
+ def __init__(self, isjunk=None, seq1='', seq2=''):
+ if isjunk:
+ warn("isjunk not NOT implemented, it will be ignored")
+ self._str1, self._str2 = seq1, seq2
+ self._reset_cache()
+
+ def set_seqs(self, seq1, seq2):
+ self._str1, self._str2 = seq1, seq2
+ self._reset_cache()
+
+ def set_seq1(self, seq1):
+ self._str1 = seq1
+ self._reset_cache()
+
+ def set_seq2(self, seq2):
+ self._str2 = seq2
+ self._reset_cache()
+
+ def get_opcodes(self):
+ if not self._opcodes:
+ if self._editops:
+ self._opcodes = opcodes(self._editops, self._str1, self._str2)
+ else:
+ self._opcodes = opcodes(self._str1, self._str2)
+ return self._opcodes
+
+ def get_editops(self):
+ if not self._editops:
+ if self._opcodes:
+ self._editops = editops(self._opcodes, self._str1, self._str2)
+ else:
+ self._editops = editops(self._str1, self._str2)
+ return self._editops
+
+ def get_matching_blocks(self):
+ if not self._matching_blocks:
+ self._matching_blocks = matching_blocks(self.get_opcodes(),
+ self._str1, self._str2)
+ return self._matching_blocks
+
+ def ratio(self):
+ if not self._ratio:
+ self._ratio = ratio(self._str1, self._str2)
+ return self._ratio
+
+ def quick_ratio(self):
+ # This is usually quick enough :o)
+ if not self._ratio:
+ self._ratio = ratio(self._str1, self._str2)
+ return self._ratio
+
+ def real_quick_ratio(self):
+ len1, len2 = len(self._str1), len(self._str2)
+ return 2.0 * min(len1, len2) / (len1 + len2)
+
+ def distance(self):
+ if not self._distance:
+ self._distance = distance(self._str1, self._str2)
+ return self._distance
diff --git a/lib/fuzzywuzzy/__init__.py b/lib/fuzzywuzzy/__init__.py
new file mode 100644
index 00000000..5b18ec2d
--- /dev/null
+++ b/lib/fuzzywuzzy/__init__.py
@@ -0,0 +1,2 @@
+# -*- coding: utf-8 -*-
+__version__ = '0.15.1'
diff --git a/lib/fuzzywuzzy/fuzz.py b/lib/fuzzywuzzy/fuzz.py
new file mode 100644
index 00000000..8dff500c
--- /dev/null
+++ b/lib/fuzzywuzzy/fuzz.py
@@ -0,0 +1,325 @@
+#!/usr/bin/env python
+# encoding: utf-8
+"""
+fuzz.py
+
+Copyright (c) 2011 Adam Cohen
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+"""
+from __future__ import unicode_literals
+import platform
+import warnings
+
+try:
+ from .StringMatcher import StringMatcher as SequenceMatcher
+except ImportError:
+ if platform.python_implementation() != "PyPy":
+ warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning')
+ from difflib import SequenceMatcher
+
+from . import utils
+
+
+###########################
+# Basic Scoring Functions #
+###########################
+
+@utils.check_for_none
+@utils.check_empty_string
+def ratio(s1, s2):
+ s1, s2 = utils.make_type_consistent(s1, s2)
+
+ m = SequenceMatcher(None, s1, s2)
+ return utils.intr(100 * m.ratio())
+
+
+@utils.check_for_none
+@utils.check_empty_string
+def partial_ratio(s1, s2):
+ """"Return the ratio of the most similar substring
+ as a number between 0 and 100."""
+ s1, s2 = utils.make_type_consistent(s1, s2)
+
+ if len(s1) <= len(s2):
+ shorter = s1
+ longer = s2
+ else:
+ shorter = s2
+ longer = s1
+
+ m = SequenceMatcher(None, shorter, longer)
+ blocks = m.get_matching_blocks()
+
+ # each block represents a sequence of matching characters in a string
+ # of the form (idx_1, idx_2, len)
+ # the best partial match will block align with at least one of those blocks
+ # e.g. shorter = "abcd", longer = XXXbcdeEEE
+ # block = (1,3,3)
+ # best score === ratio("abcd", "Xbcd")
+ scores = []
+ for block in blocks:
+ long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0
+ long_end = long_start + len(shorter)
+ long_substr = longer[long_start:long_end]
+
+ m2 = SequenceMatcher(None, shorter, long_substr)
+ r = m2.ratio()
+ if r > .995:
+ return 100
+ else:
+ scores.append(r)
+
+ return utils.intr(100 * max(scores))
+
+
+##############################
+# Advanced Scoring Functions #
+##############################
+
+def _process_and_sort(s, force_ascii, full_process=True):
+ """Return a cleaned string with token sorted."""
+ # pull tokens
+ ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s
+ tokens = ts.split()
+
+ # sort tokens and join
+ sorted_string = u" ".join(sorted(tokens))
+ return sorted_string.strip()
+
+
+# Sorted Token
+# find all alphanumeric tokens in the string
+# sort those tokens and take ratio of resulting joined strings
+# controls for unordered string elements
+@utils.check_for_none
+def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True):
+ sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process)
+ sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process)
+
+ if partial:
+ return partial_ratio(sorted1, sorted2)
+ else:
+ return ratio(sorted1, sorted2)
+
+
+def token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
+ """Return a measure of the sequences' similarity between 0 and 100
+ but sorting the token before comparing.
+ """
+ return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
+
+
+def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
+ """Return the ratio of the most similar substring as a number between
+ 0 and 100 but sorting the token before comparing.
+ """
+ return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
+
+
+@utils.check_for_none
+def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True):
+ """Find all alphanumeric tokens in each string...
+ - treat them as a set
+ - construct two strings of the form:
+
+ - take ratios of those two strings
+ - controls for unordered partial matches"""
+
+ p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1
+ p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2
+
+ if not utils.validate_string(p1):
+ return 0
+ if not utils.validate_string(p2):
+ return 0
+
+ # pull tokens
+ tokens1 = set(p1.split())
+ tokens2 = set(p2.split())
+
+ intersection = tokens1.intersection(tokens2)
+ diff1to2 = tokens1.difference(tokens2)
+ diff2to1 = tokens2.difference(tokens1)
+
+ sorted_sect = " ".join(sorted(intersection))
+ sorted_1to2 = " ".join(sorted(diff1to2))
+ sorted_2to1 = " ".join(sorted(diff2to1))
+
+ combined_1to2 = sorted_sect + " " + sorted_1to2
+ combined_2to1 = sorted_sect + " " + sorted_2to1
+
+ # strip
+ sorted_sect = sorted_sect.strip()
+ combined_1to2 = combined_1to2.strip()
+ combined_2to1 = combined_2to1.strip()
+
+ if partial:
+ ratio_func = partial_ratio
+ else:
+ ratio_func = ratio
+
+ pairwise = [
+ ratio_func(sorted_sect, combined_1to2),
+ ratio_func(sorted_sect, combined_2to1),
+ ratio_func(combined_1to2, combined_2to1)
+ ]
+ return max(pairwise)
+
+
+def token_set_ratio(s1, s2, force_ascii=True, full_process=True):
+ return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
+
+
+def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True):
+ return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
+
+
+###################
+# Combination API #
+###################
+
+# q is for quick
+def QRatio(s1, s2, force_ascii=True, full_process=True):
+ """
+ Quick ratio comparison between two strings.
+
+ Runs full_process from utils on both strings
+ Short circuits if either of the strings is empty after processing.
+
+ :param s1:
+ :param s2:
+ :param force_ascii: Allow only ASCII characters (Default: True)
+ :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True)
+ :return: similarity ratio
+ """
+
+ if full_process:
+ p1 = utils.full_process(s1, force_ascii=force_ascii)
+ p2 = utils.full_process(s2, force_ascii=force_ascii)
+ else:
+ p1 = s1
+ p2 = s2
+
+ if not utils.validate_string(p1):
+ return 0
+ if not utils.validate_string(p2):
+ return 0
+
+ return ratio(p1, p2)
+
+
+def UQRatio(s1, s2, full_process=True):
+ """
+ Unicode quick ratio
+
+ Calls QRatio with force_ascii set to False
+
+ :param s1:
+ :param s2:
+ :return: similarity ratio
+ """
+ return QRatio(s1, s2, force_ascii=False, full_process=full_process)
+
+
+# w is for weighted
+def WRatio(s1, s2, force_ascii=True, full_process=True):
+ """
+ Return a measure of the sequences' similarity between 0 and 100, using different algorithms.
+
+ **Steps in the order they occur**
+
+ #. Run full_process from utils on both strings
+ #. Short circuit if this makes either string empty
+ #. Take the ratio of the two processed strings (fuzz.ratio)
+ #. Run checks to compare the length of the strings
+ * If one of the strings is more than 1.5 times as long as the other
+ use partial_ratio comparisons - scale partial results by 0.9
+ (this makes sure only full results can return 100)
+ * If one of the strings is over 8 times as long as the other
+ instead scale by 0.6
+
+ #. Run the other ratio functions
+ * if using partial ratio functions call partial_ratio,
+ partial_token_sort_ratio and partial_token_set_ratio
+ scale all of these by the ratio based on length
+ * otherwise call token_sort_ratio and token_set_ratio
+ * all token based comparisons are scaled by 0.95
+ (on top of any partial scalars)
+
+ #. Take the highest value from these results
+ round it and return it as an integer.
+
+ :param s1:
+ :param s2:
+ :param force_ascii: Allow only ascii characters
+ :type force_ascii: bool
+ :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True)
+ :return:
+ """
+
+ if full_process:
+ p1 = utils.full_process(s1, force_ascii=force_ascii)
+ p2 = utils.full_process(s2, force_ascii=force_ascii)
+ else:
+ p1 = s1
+ p2 = s2
+
+ if not utils.validate_string(p1):
+ return 0
+ if not utils.validate_string(p2):
+ return 0
+
+ # should we look at partials?
+ try_partial = True
+ unbase_scale = .95
+ partial_scale = .90
+
+ base = ratio(p1, p2)
+ len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2))
+
+ # if strings are similar length, don't use partials
+ if len_ratio < 1.5:
+ try_partial = False
+
+ # if one string is much much shorter than the other
+ if len_ratio > 8:
+ partial_scale = .6
+
+ if try_partial:
+ partial = partial_ratio(p1, p2) * partial_scale
+ ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \
+ * unbase_scale * partial_scale
+ ptser = partial_token_set_ratio(p1, p2, full_process=False) \
+ * unbase_scale * partial_scale
+
+ return utils.intr(max(base, partial, ptsor, ptser))
+ else:
+ tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale
+ tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale
+
+ return utils.intr(max(base, tsor, tser))
+
+
+def UWRatio(s1, s2, full_process=True):
+ """Return a measure of the sequences' similarity between 0 and 100,
+ using different algorithms. Same as WRatio but preserving unicode.
+ """
+ return WRatio(s1, s2, force_ascii=False, full_process=full_process)
diff --git a/lib/fuzzywuzzy/process.py b/lib/fuzzywuzzy/process.py
new file mode 100644
index 00000000..61b38f1b
--- /dev/null
+++ b/lib/fuzzywuzzy/process.py
@@ -0,0 +1,310 @@
+#!/usr/bin/env python
+# encoding: utf-8
+"""
+process.py
+
+Copyright (c) 2011 Adam Cohen
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+"""
+
+from . import fuzz
+from . import utils
+import heapq
+import logging
+from functools import partial
+
+
+default_scorer = fuzz.WRatio
+
+
+default_processor = utils.full_process
+
+
+def extractWithoutOrder(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0):
+ """Select the best match in a list or dictionary of choices.
+
+ Find best matches in a list or dictionary of choices, return a
+ generator of tuples containing the match and its score. If a dictionary
+ is used, also returns the key for each match.
+
+ Arguments:
+ query: An object representing the thing we want to find.
+ choices: An iterable or dictionary-like object containing choices
+ to be matched against the query. Dictionary arguments of
+ {key: value} pairs will attempt to match the query against
+ each value.
+ processor: Optional function of the form f(a) -> b, where a is the query or
+ individual choice and b is the choice to be used in matching.
+
+ This can be used to match against, say, the first element of
+ a list:
+
+ lambda x: x[0]
+
+ Defaults to fuzzywuzzy.utils.full_process().
+ scorer: Optional function for scoring matches between the query and
+ an individual processed choice. This should be a function
+ of the form f(query, choice) -> int.
+
+ By default, fuzz.WRatio() is used and expects both query and
+ choice to be strings.
+ score_cutoff: Optional argument for score threshold. No matches with
+ a score less than this number will be returned. Defaults to 0.
+
+ Returns:
+ Generator of tuples containing the match and its score.
+
+ If a list is used for choices, then the result will be 2-tuples.
+ If a dictionary is used, then the result will be 3-tuples containing
+ the key for each match.
+
+ For example, searching for 'bird' in the dictionary
+
+ {'bard': 'train', 'dog': 'man'}
+
+ may return
+
+ ('train', 22, 'bard'), ('man', 0, 'dog')
+ """
+ # Catch generators without lengths
+ def no_process(x):
+ return x
+
+ try:
+ if choices is None or len(choices) == 0:
+ raise StopIteration
+ except TypeError:
+ pass
+
+ # If the processor was removed by setting it to None
+ # perfom a noop as it still needs to be a function
+ if processor is None:
+ processor = no_process
+
+ # Run the processor on the input query.
+ processed_query = processor(query)
+
+ if len(processed_query) == 0:
+ logging.warning(u"Applied processor reduces input query to empty string, "
+ "all comparisons will have score 0. "
+ "[Query: \'{0}\']".format(query))
+
+ # Don't run full_process twice
+ if scorer in [fuzz.WRatio, fuzz.QRatio,
+ fuzz.token_set_ratio, fuzz.token_sort_ratio,
+ fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio,
+ fuzz.UWRatio, fuzz.UQRatio] \
+ and processor == utils.full_process:
+ processor = no_process
+
+ # Only process the query once instead of for every choice
+ if scorer in [fuzz.UWRatio, fuzz.UQRatio]:
+ pre_processor = partial(utils.full_process, force_ascii=False)
+ scorer = partial(scorer, full_process=False)
+ elif scorer in [fuzz.WRatio, fuzz.QRatio,
+ fuzz.token_set_ratio, fuzz.token_sort_ratio,
+ fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio]:
+ pre_processor = partial(utils.full_process, force_ascii=True)
+ scorer = partial(scorer, full_process=False)
+ else:
+ pre_processor = no_process
+ processed_query = pre_processor(processed_query)
+
+ try:
+ # See if choices is a dictionary-like object.
+ for key, choice in choices.items():
+ processed = pre_processor(processor(choice))
+ score = scorer(processed_query, processed)
+ if score >= score_cutoff:
+ yield (choice, score, key)
+ except AttributeError:
+ # It's a list; just iterate over it.
+ for choice in choices:
+ processed = pre_processor(processor(choice))
+ score = scorer(processed_query, processed)
+ if score >= score_cutoff:
+ yield (choice, score)
+
+
+def extract(query, choices, processor=default_processor, scorer=default_scorer, limit=5):
+ """Select the best match in a list or dictionary of choices.
+
+ Find best matches in a list or dictionary of choices, return a
+ list of tuples containing the match and its score. If a dictionary
+ is used, also returns the key for each match.
+
+ Arguments:
+ query: An object representing the thing we want to find.
+ choices: An iterable or dictionary-like object containing choices
+ to be matched against the query. Dictionary arguments of
+ {key: value} pairs will attempt to match the query against
+ each value.
+ processor: Optional function of the form f(a) -> b, where a is the query or
+ individual choice and b is the choice to be used in matching.
+
+ This can be used to match against, say, the first element of
+ a list:
+
+ lambda x: x[0]
+
+ Defaults to fuzzywuzzy.utils.full_process().
+ scorer: Optional function for scoring matches between the query and
+ an individual processed choice. This should be a function
+ of the form f(query, choice) -> int.
+ By default, fuzz.WRatio() is used and expects both query and
+ choice to be strings.
+ limit: Optional maximum for the number of elements returned. Defaults
+ to 5.
+
+ Returns:
+ List of tuples containing the match and its score.
+
+ If a list is used for choices, then the result will be 2-tuples.
+ If a dictionary is used, then the result will be 3-tuples containing
+ the key for each match.
+
+ For example, searching for 'bird' in the dictionary
+
+ {'bard': 'train', 'dog': 'man'}
+
+ may return
+
+ [('train', 22, 'bard'), ('man', 0, 'dog')]
+ """
+ sl = extractWithoutOrder(query, choices, processor, scorer)
+ return heapq.nlargest(limit, sl, key=lambda i: i[1]) if limit is not None else \
+ sorted(sl, key=lambda i: i[1], reverse=True)
+
+
+def extractBests(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0, limit=5):
+ """Get a list of the best matches to a collection of choices.
+
+ Convenience function for getting the choices with best scores.
+
+ Args:
+ query: A string to match against
+ choices: A list or dictionary of choices, suitable for use with
+ extract().
+ processor: Optional function for transforming choices before matching.
+ See extract().
+ scorer: Scoring function for extract().
+ score_cutoff: Optional argument for score threshold. No matches with
+ a score less than this number will be returned. Defaults to 0.
+ limit: Optional maximum for the number of elements returned. Defaults
+ to 5.
+
+ Returns: A a list of (match, score) tuples.
+ """
+
+ best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff)
+ return heapq.nlargest(limit, best_list, key=lambda i: i[1]) if limit is not None else \
+ sorted(best_list, key=lambda i: i[1], reverse=True)
+
+
+def extractOne(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0):
+ """Find the single best match above a score in a list of choices.
+
+ This is a convenience method which returns the single best choice.
+ See extract() for the full arguments list.
+
+ Args:
+ query: A string to match against
+ choices: A list or dictionary of choices, suitable for use with
+ extract().
+ processor: Optional function for transforming choices before matching.
+ See extract().
+ scorer: Scoring function for extract().
+ score_cutoff: Optional argument for score threshold. If the best
+ match is found, but it is not greater than this number, then
+ return None anyway ("not a good enough match"). Defaults to 0.
+
+ Returns:
+ A tuple containing a single match and its score, if a match
+ was found that was above score_cutoff. Otherwise, returns None.
+ """
+ best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff)
+ try:
+ return max(best_list, key=lambda i: i[1])
+ except ValueError:
+ return None
+
+
+def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio):
+ """This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify
+ and remove duplicates. Specifically, it uses the process.extract to identify duplicates that
+ score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list
+ since we assume this item contains the most entity information and returns that. It breaks string
+ length ties on an alphabetical sort.
+
+ Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the
+ returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less
+ sensitive.
+
+ Args:
+ contains_dupes: A list of strings that we would like to dedupe.
+ threshold: the numerical value (0,100) point at which we expect to find duplicates.
+ Defaults to 70 out of 100
+ scorer: Optional function for scoring matches between the query and
+ an individual processed choice. This should be a function
+ of the form f(query, choice) -> int.
+ By default, fuzz.token_set_ratio() is used and expects both query and
+ choice to be strings.
+
+ Returns:
+ A deduplicated list. For example:
+
+ In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins']
+ In: fuzzy_dedupe(contains_dupes)
+ Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf']
+ """
+
+ extractor = []
+
+ # iterate over items in *contains_dupes*
+ for item in contains_dupes:
+ # return all duplicate matches found
+ matches = extract(item, contains_dupes, limit=None, scorer=scorer)
+ # filter matches based on the threshold
+ filtered = [x for x in matches if x[1] > threshold]
+ # if there is only 1 item in *filtered*, no duplicates were found so append to *extracted*
+ if len(filtered) == 1:
+ extractor.append(filtered[0][0])
+
+ else:
+ # alpha sort
+ filtered = sorted(filtered, key=lambda x: x[0])
+ # length sort
+ filter_sort = sorted(filtered, key=lambda x: len(x[0]), reverse=True)
+ # take first item as our 'canonical example'
+ extractor.append(filter_sort[0][0])
+
+ # uniquify *extractor* list
+ keys = {}
+ for e in extractor:
+ keys[e] = 1
+ extractor = keys.keys()
+
+ # check that extractor differs from contain_dupes (e.g. duplicates were found)
+ # if not, then return the original list
+ if len(extractor) == len(contains_dupes):
+ return contains_dupes
+ else:
+ return extractor
diff --git a/lib/fuzzywuzzy/string_processing.py b/lib/fuzzywuzzy/string_processing.py
new file mode 100644
index 00000000..1bfc660b
--- /dev/null
+++ b/lib/fuzzywuzzy/string_processing.py
@@ -0,0 +1,30 @@
+from __future__ import unicode_literals
+import re
+import string
+import sys
+
+PY3 = sys.version_info[0] == 3
+if PY3:
+ string = str
+
+
+class StringProcessor(object):
+ """
+ This class defines method to process strings in the most
+ efficient way. Ideally all the methods below use unicode strings
+ for both input and output.
+ """
+
+ regex = re.compile(r"(?ui)\W")
+
+ @classmethod
+ def replace_non_letters_non_numbers_with_whitespace(cls, a_string):
+ """
+ This function replaces any sequence of non letters and non
+ numbers with a single white space.
+ """
+ return cls.regex.sub(" ", a_string)
+
+ strip = staticmethod(string.strip)
+ to_lower_case = staticmethod(string.lower)
+ to_upper_case = staticmethod(string.upper)
diff --git a/lib/fuzzywuzzy/utils.py b/lib/fuzzywuzzy/utils.py
new file mode 100644
index 00000000..db9d2875
--- /dev/null
+++ b/lib/fuzzywuzzy/utils.py
@@ -0,0 +1,99 @@
+from __future__ import unicode_literals
+import sys
+import functools
+
+from fuzzywuzzy.string_processing import StringProcessor
+
+
+PY3 = sys.version_info[0] == 3
+
+
+def validate_string(s):
+ """
+ Check input has length and that length > 0
+
+ :param s:
+ :return: True if len(s) > 0 else False
+ """
+ try:
+ return len(s) > 0
+ except TypeError:
+ return False
+
+
+def check_for_none(func):
+ @functools.wraps(func)
+ def decorator(*args, **kwargs):
+ if args[0] is None or args[1] is None:
+ return 0
+ return func(*args, **kwargs)
+ return decorator
+
+
+def check_empty_string(func):
+ @functools.wraps(func)
+ def decorator(*args, **kwargs):
+ if len(args[0]) == 0 or len(args[1]) == 0:
+ return 0
+ return func(*args, **kwargs)
+ return decorator
+
+
+bad_chars = str("").join([chr(i) for i in range(128, 256)]) # ascii dammit!
+if PY3:
+ translation_table = dict((ord(c), None) for c in bad_chars)
+ unicode = str
+
+
+def asciionly(s):
+ if PY3:
+ return s.translate(translation_table)
+ else:
+ return s.translate(None, bad_chars)
+
+
+def asciidammit(s):
+ if type(s) is str:
+ return asciionly(s)
+ elif type(s) is unicode:
+ return asciionly(s.encode('ascii', 'ignore'))
+ else:
+ return asciidammit(unicode(s))
+
+
+def make_type_consistent(s1, s2):
+ """If both objects aren't either both string or unicode instances force them to unicode"""
+ if isinstance(s1, str) and isinstance(s2, str):
+ return s1, s2
+
+ elif isinstance(s1, unicode) and isinstance(s2, unicode):
+ return s1, s2
+
+ else:
+ return unicode(s1), unicode(s2)
+
+
+def full_process(s, force_ascii=False):
+ """Process string by
+ -- removing all but letters and numbers
+ -- trim whitespace
+ -- force to lower case
+ if force_ascii == True, force convert to ascii"""
+
+ if s is None:
+ return ""
+
+ if force_ascii:
+ s = asciidammit(s)
+ # Keep only Letters and Numbers (see Unicode docs).
+ string_out = StringProcessor.replace_non_letters_non_numbers_with_whitespace(s)
+ # Force into lowercase.
+ string_out = StringProcessor.to_lower_case(string_out)
+ # Remove leading and trailing whitespaces.
+ string_out = StringProcessor.strip(string_out)
+ return string_out
+
+
+def intr(n):
+ '''Returns a correctly rounded integer'''
+ return int(round(n))
diff --git a/lib/libtrakt/__init__.py b/lib/libtrakt/__init__.py
index 93f9ffcc..4797bd0c 100644
--- a/lib/libtrakt/__init__.py
+++ b/lib/libtrakt/__init__.py
@@ -1 +1,2 @@
from trakt import TraktAPI
+from indexerapiinterface import TraktIndexer
diff --git a/lib/libtrakt/exceptions.py b/lib/libtrakt/exceptions.py
index 2af697bc..67126eed 100644
--- a/lib/libtrakt/exceptions.py
+++ b/lib/libtrakt/exceptions.py
@@ -1,10 +1,14 @@
-class TraktException(Exception):
- pass
-
-
-class TraktAuthException(TraktException):
- pass
-
-
-class TraktServerBusy(TraktException):
- pass
+class TraktException(Exception):
+ pass
+
+
+class TraktAuthException(TraktException):
+ pass
+
+
+class TraktServerBusy(TraktException):
+ pass
+
+
+class TraktShowNotFound(TraktException):
+ pass
\ No newline at end of file
diff --git a/lib/libtrakt/indexerapiinterface.py b/lib/libtrakt/indexerapiinterface.py
new file mode 100644
index 00000000..4c98cb90
--- /dev/null
+++ b/lib/libtrakt/indexerapiinterface.py
@@ -0,0 +1,177 @@
+import logging
+import re
+import time
+from .exceptions import TraktShowNotFound, TraktException
+from sickbeard.exceptions import ex
+from trakt import TraktAPI
+
+
+class ShowContainer(dict):
+ """Simple dict that holds a series of Show instances
+ """
+
+ def __init__(self, **kwargs):
+ super(ShowContainer, self).__init__(**kwargs)
+
+ self._stack = []
+ self._lastgc = time.time()
+
+ def __setitem__(self, key, value):
+
+ self._stack.append(key)
+
+ # keep only the 100th latest results
+ if time.time() - self._lastgc > 20:
+ for o in self._stack[:-100]:
+ del self[o]
+
+ self._stack = self._stack[-100:]
+
+ self._lastgc = time.time()
+
+ super(ShowContainer, self).__setitem__(key, value)
+
+
+def log():
+ return logging.getLogger('trakt_api')
+
+
+class TraktSearchTypes:
+ text = 1
+ trakt_id = 'trakt'
+ tvdb_id = 'tvdb'
+ imdb_id = 'imdb'
+ tmdb_id = 'tmdb'
+ tvrage_id = 'tvrage'
+ all = [text, trakt_id, tvdb_id, imdb_id, tmdb_id, tvrage_id]
+
+ def __init__(self):
+ pass
+
+
+class TraktResultTypes:
+ show = 'show'
+ episode = 'episode'
+ movie = 'movie'
+ person = 'person'
+ list = 'list'
+ all = [show, episode, movie, person, list]
+
+ def __init__(self):
+ pass
+
+
+class TraktIndexer:
+ # noinspection PyUnusedLocal
+ # noinspection PyDefaultArgument
+ def __init__(self, custom_ui=None, sleep_retry=None, search_type=TraktSearchTypes.text,
+ result_types=[TraktResultTypes.show], *args, **kwargs):
+
+ self.config = {
+ 'apikey': '',
+ 'debug_enabled': False,
+ 'custom_ui': custom_ui,
+ 'proxy': None,
+ 'cache_enabled': False,
+ 'cache_location': '',
+ 'valid_languages': [],
+ 'langabbv_to_id': {},
+ 'language': 'en',
+ 'base_url': '',
+ 'search_type': search_type if search_type in TraktSearchTypes.all else TraktSearchTypes.text,
+ 'sleep_retry': sleep_retry,
+ 'result_types': result_types if isinstance(result_types, list) and all(x in TraktResultTypes.all for x in result_types) else [TraktResultTypes.show],
+ }
+
+ self.corrections = {}
+ self.shows = ShowContainer()
+
+ def _get_series(self, series):
+ """This searches Trakt for the series name,
+ If a custom_ui UI is configured, it uses this to select the correct
+ series.
+ """
+ all_series = self.search(series)
+ if not isinstance(all_series, list):
+ all_series = [all_series]
+
+ if 0 == len(all_series):
+ log().debug('Series result returned zero')
+ raise TraktShowNotFound('Show-name search returned zero results (cannot find show on TVDB)')
+
+ if None is not self.config['custom_ui']:
+ log().debug('Using custom UI %s' % (repr(self.config['custom_ui'])))
+ custom_ui = self.config['custom_ui']
+ ui = custom_ui(config=self.config)
+
+ return ui.select_series(all_series)
+
+ return all_series
+
+ def __getitem__(self, key):
+ """Handles trakt_instance['seriesname'] calls.
+ The dict index should be the show id
+ """
+ if isinstance(key, tuple) and 2 == len(key):
+ key = key[0]
+
+ self.config['searchterm'] = key
+ selected_series = self._get_series(key)
+ if isinstance(selected_series, dict):
+ selected_series = [selected_series]
+
+ return selected_series
+
+ def __repr__(self):
+ return str(self.shows)
+
+ def _clean_data(self, data):
+ """Cleans up strings, lists, dicts returned
+
+ Issues corrected:
+ - Replaces & with &
+ - Trailing whitespace
+ """
+ if isinstance(data, list):
+ return [self._clean_data(d) for d in data]
+ if isinstance(data, dict):
+ return {k: self._clean_data(v) for k, v in data.iteritems()}
+ return data if not isinstance(data, (str, unicode)) else data.strip().replace(u'&', u'&')
+
+ @staticmethod
+ def _dict_prevent_none(d, key, default):
+ v = None
+ if isinstance(d, dict):
+ v = d.get(key, default)
+ return (v, default)[None is v]
+
+ def search(self, series):
+ if TraktSearchTypes.text != self.config['search_type']:
+ url = '/search/%s/%s?type=%s&extended=full&limit=100' % (self.config['search_type'], series,
+ ','.join(self.config['result_types']))
+ else:
+ url = '/search/%s?query=%s&extended=full&limit=100' % (','.join(self.config['result_types']), series)
+ filtered = []
+ kwargs = {}
+ if None is not self.config['sleep_retry']:
+ kwargs['sleep_retry'] = self.config['sleep_retry']
+ try:
+ resp = TraktAPI().trakt_request(url, **kwargs)
+ if len(resp):
+ for d in resp:
+ if isinstance(d, dict) and 'type' in d and d['type'] in self.config['result_types']:
+ for k, v in d.iteritems():
+ d[k] = self._clean_data(v)
+ if 'show' in d and TraktResultTypes.show == d['type']:
+ d.update(d['show'])
+ del d['show']
+ d['seriesname'] = self._dict_prevent_none(d, 'title', '')
+ d['genres_list'] = d.get('genres', [])
+ d['genres'] = ', '.join(['%s' % v for v in d.get('genres', []) or [] if v])
+ d['firstaired'] = (d.get('first_aired') and
+ re.sub(r'T.*$', '', str(d.get('first_aired'))) or d.get('year'))
+ filtered.append(d)
+ except TraktException as e:
+ log().debug('Could not connect to Trakt service: %s' % ex(e))
+
+ return filtered
diff --git a/lib/libtrakt/trakt.py b/lib/libtrakt/trakt.py
index 4096067d..d09a9da0 100644
--- a/lib/libtrakt/trakt.py
+++ b/lib/libtrakt/trakt.py
@@ -6,7 +6,7 @@ import time
import datetime
from sickbeard import logger
-from exceptions import TraktException, TraktAuthException # , TraktServerBusy
+from .exceptions import TraktException, TraktAuthException # , TraktServerBusy
class TraktAccount:
diff --git a/lib/tvdb_api/tvdb_api.py b/lib/tvdb_api/tvdb_api.py
index a959f2ed..d00c4b70 100644
--- a/lib/tvdb_api/tvdb_api.py
+++ b/lib/tvdb_api/tvdb_api.py
@@ -174,9 +174,9 @@ class Show(dict):
Search terms are converted to lower case (unicode) strings.
# Examples
-
+
These examples assume t is an instance of Tvdb():
-
+
>> t = Tvdb()
>>
@@ -347,6 +347,7 @@ class Tvdb:
u'My Last Day'
"""
+ # noinspection PyUnusedLocal
def __init__(self,
interactive=False,
select_first=False,
@@ -363,7 +364,9 @@ class Tvdb:
search_all_languages=False,
apikey=None,
dvdorder=False,
- proxy=None):
+ proxy=None,
+ *args,
+ **kwargs):
"""interactive (True/False):
When True, uses built-in console UI is used to select the correct show.
@@ -665,15 +668,18 @@ class Tvdb:
else:
self.shows[sid].data[key] = value
- @staticmethod
- def _clean_data(data):
- """Cleans up strings returned by TheTVDB.com
+ def _clean_data(self, data):
+ """Cleans up strings, lists, dicts returned
Issues corrected:
- Replaces & with &
- Trailing whitespace
"""
- return data if not isinstance(data, basestring) else data.strip().replace(u'&', u'&')
+ if isinstance(data, list):
+ return [self._clean_data(d) for d in data]
+ if isinstance(data, dict):
+ return {k: self._clean_data(v) for k, v in data.iteritems()}
+ return data if not isinstance(data, (str, unicode)) else data.strip().replace(u'&', u'&')
def search(self, series):
"""This searches TheTVDB.com for the series name
@@ -719,7 +725,7 @@ class Tvdb:
log().debug('Interactively selecting show using ConsoleUI')
ui = ConsoleUI(config=self.config)
- return ui.selectSeries(all_series)
+ return ui.select_series(all_series)
def _parse_banners(self, sid, img_list):
banners = {}
diff --git a/lib/tvdb_api/tvdb_ui.py b/lib/tvdb_api/tvdb_ui.py
index 7725802c..b3ffc787 100644
--- a/lib/tvdb_api/tvdb_ui.py
+++ b/lib/tvdb_api/tvdb_ui.py
@@ -13,14 +13,14 @@ A UI is a callback. A class, it's __init__ function takes two arguments:
- log, which is Tvdb's logger instance (which uses the logging module). You can
call log.info() log.warning() etc
-It must have a method "selectSeries", this is passed a list of dicts, each dict
+It must have a method "select_series", this is passed a list of dicts, each dict
contains the the keys "name" (human readable show name), and "sid" (the shows
ID as on thetvdb.com). For example:
[{'name': u'Lost', 'sid': u'73739'},
{'name': u'Lost Universe', 'sid': u'73181'}]
-The "selectSeries" method must return the appropriate dict, or it can raise
+The "select_series" method must return the appropriate dict, or it can raise
tvdb_userabort (if the selection is aborted), tvdb_shownotfound (if the show
cannot be found).
@@ -29,7 +29,7 @@ A simple example callback, which returns a random series:
>>> import random
>>> from tvdb_ui import BaseUI
>>> class RandomUI(BaseUI):
-... def selectSeries(self, allSeries):
+... def select_series(self, allSeries):
... import random
... return random.choice(allSeries)
@@ -50,9 +50,11 @@ import warnings
from tvdb_exceptions import tvdb_userabort
+
def log():
return logging.getLogger(__name__)
+
class BaseUI:
"""Default non-interactive UI, which auto-selects first results
"""
@@ -64,8 +66,8 @@ class BaseUI:
"The self.log attribute will be removed in the next version")
self.log = logging.getLogger(__name__)
- def selectSeries(self, allSeries):
- return allSeries[0]
+ def select_series(self, all_series):
+ return all_series[0]
class ConsoleUI(BaseUI):
@@ -98,17 +100,17 @@ class ConsoleUI(BaseUI):
extra
)
- def selectSeries(self, allSeries):
- self._displaySeries(allSeries)
+ def select_series(self, all_series):
+ self._displaySeries(all_series)
- if len(allSeries) == 1:
+ if len(all_series) == 1:
# Single result, return it!
print "Automatically selecting only result"
- return allSeries[0]
+ return all_series[0]
if self.config['select_first'] is True:
print "Automatically returning first search result"
- return allSeries[0]
+ return all_series[0]
while True: # return breaks this loop
try:
@@ -126,7 +128,7 @@ class ConsoleUI(BaseUI):
if len(ans.strip()) == 0:
# Default option
log().debug('Default option, returning first series')
- return allSeries[0]
+ return all_series[0]
if ans == "q":
log().debug('Got quit command (q)')
raise tvdb_userabort("User aborted ('q' quit command)")
@@ -139,15 +141,15 @@ class ConsoleUI(BaseUI):
print "# q - abort tvnamer"
print "# Press return with no input to select first result"
elif ans.lower() in ["a", "all"]:
- self._displaySeries(allSeries, limit = None)
+ self._displaySeries(all_series, limit = None)
else:
log().debug('Unknown keypress %s' % (ans))
else:
log().debug('Trying to return ID: %d' % (selected_id))
try:
- return allSeries[selected_id]
+ return all_series[selected_id]
except IndexError:
log().debug('Invalid show number entered!')
print "Invalid number (%s) selected!"
- self._displaySeries(allSeries)
+ self._displaySeries(all_series)
diff --git a/sickbeard/__init__.py b/sickbeard/__init__.py
index 4687b972..dc25093b 100755
--- a/sickbeard/__init__.py
+++ b/sickbeard/__init__.py
@@ -59,7 +59,7 @@ CFG = None
CONFIG_FILE = None
# This is the version of the config we EXPECT to find
-CONFIG_VERSION = 15
+CONFIG_VERSION = 16
# Default encryption version (0 for None)
ENCRYPTION_VERSION = 0
@@ -167,6 +167,8 @@ METADATA_TIVO = None
METADATA_MEDE8ER = None
METADATA_KODI = None
+RESULTS_SORTBY = None
+
QUALITY_DEFAULT = None
STATUS_DEFAULT = None
WANTED_BEGIN_DEFAULT = None
@@ -536,6 +538,8 @@ def initialize(console_logging=True):
versionCheckScheduler, showQueueScheduler, searchQueueScheduler, \
properFinderScheduler, autoPostProcesserScheduler, subtitlesFinderScheduler, background_mapping_task, \
provider_ping_thread_pool
+ # Add Show Search
+ global RESULTS_SORTBY
# Add Show Defaults
global STATUS_DEFAULT, QUALITY_DEFAULT, SHOW_TAG_DEFAULT, FLATTEN_FOLDERS_DEFAULT, SUBTITLES_DEFAULT, \
WANTED_BEGIN_DEFAULT, WANTED_LATEST_DEFAULT, SCENE_DEFAULT, ANIME_DEFAULT
@@ -754,6 +758,8 @@ def initialize(console_logging=True):
if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', ROOT_DIRS):
ROOT_DIRS = ''
+ RESULTS_SORTBY = check_setting_str(CFG, 'General', 'results_sortby', '')
+
QUALITY_DEFAULT = check_setting_int(CFG, 'General', 'quality_default', SD)
STATUS_DEFAULT = check_setting_int(CFG, 'General', 'status_default', SKIPPED)
WANTED_BEGIN_DEFAULT = check_setting_int(CFG, 'General', 'wanted_begin_default', 0)
@@ -1512,6 +1518,7 @@ def save_config():
new_config['General']['recentsearch_startup'] = int(RECENTSEARCH_STARTUP)
new_config['General']['backlog_nofull'] = int(BACKLOG_NOFULL)
new_config['General']['skip_removed_files'] = int(SKIP_REMOVED_FILES)
+ new_config['General']['results_sortby'] = str(RESULTS_SORTBY)
new_config['General']['quality_default'] = int(QUALITY_DEFAULT)
new_config['General']['status_default'] = int(STATUS_DEFAULT)
new_config['General']['wanted_begin_default'] = int(WANTED_BEGIN_DEFAULT)
diff --git a/sickbeard/classes.py b/sickbeard/classes.py
index a6642cbb..8baa0386 100644
--- a/sickbeard/classes.py
+++ b/sickbeard/classes.py
@@ -19,7 +19,6 @@ import re
import datetime
import sickbeard
-from lib.dateutil import parser
from sickbeard.common import Quality
from unidecode import unidecode
@@ -72,20 +71,15 @@ class SearchResult:
if self.provider is None:
return 'Invalid provider, unable to print self'
- myString = '%s @ %s\n' % (self.provider.name, self.url)
- myString += 'Extra Info:\n'
- for extra in self.extraInfo:
- myString += ' %s\n' % extra
- myString += 'Episode: %s\n' % self.episodes
- myString += 'Quality: %s\n' % Quality.qualityStrings[self.quality]
- myString += 'Name: %s\n' % self.name
- myString += 'Size: %s\n' % str(self.size)
- myString += 'Release Group: %s\n' % self.release_group
-
- return myString
-
- def fileName(self):
- return self.episodes[0].prettyName() + '.' + self.resultType
+ return '\n'.join([
+ '%s @ %s' % (self.provider.name, self.url),
+ 'Extra Info:',
+ '\n'.join([' %s' % x for x in self.extraInfo]),
+ 'Episode: %s' % self.episodes,
+ 'Quality: %s' % Quality.qualityStrings[self.quality],
+ 'Name: %s' % self.name,
+ 'Size: %s' % self.size,
+ 'Release Group: %s' % self.release_group])
def get_data(self):
if None is not self.get_data_func:
@@ -97,6 +91,7 @@ class SearchResult:
return self.extraInfo[0]
return None
+
class NZBSearchResult(SearchResult):
"""
Regular NZB result with an URL to the NZB
@@ -122,7 +117,66 @@ class TorrentSearchResult(SearchResult):
hash = None
-class AllShowsListUI:
+class ShowFilter(object):
+ def __init__(self, config, log=None):
+ self.config = config
+ self.log = log
+ self.bad_names = [re.compile('(?i)%s' % r) for r in (
+ '[*]+\s*(?:403:|do not add|dupli[^s]+\s*(?:\d+| age_limit]
+ self[:] = [x for x in self if self._is_cache_item(x) and age_limit < x[1]]
def __repr__(self):
- return str([x[0] for x in self if isinstance(x, (tuple, list)) and len(x) == 2])
+ return str([x[0] for x in self if self._is_cache_item(x)])
- def __contains__(self, y):
+ def __contains__(self, url):
for x in self:
- if isinstance(x, (tuple, list)) and len(x) == 2 and y == x[0]:
+ if self._is_cache_item(x) and url == x[0]:
return True
return False
- def remove(self, x):
- for v in self:
- if isinstance(v, (tuple, list)) and len(v) == 2 and v[0] == x:
- super(ImageUrlList, self).remove(v)
+ def remove(self, url):
+ for x in self:
+ if self._is_cache_item(x) and url == x[0]:
+ super(ImageUrlList, self).remove(x)
break
diff --git a/sickbeard/config.py b/sickbeard/config.py
index fee47514..1b66c0f6 100644
--- a/sickbeard/config.py
+++ b/sickbeard/config.py
@@ -449,7 +449,8 @@ class ConfigMigrator():
12: 'Add "hevc" and some non-english languages to ignore words if not found',
13: 'Change default dereferrer url to blank',
14: 'Convert Trakt to multi-account',
- 15: 'Transmithe.net rebranded Nebulance'}
+ 15: 'Transmithe.net rebranded Nebulance',
+ 16: 'Purge old cache image folders'}
def migrate_config(self):
""" Calls each successive migration until the config is the same version as SG expects """
@@ -807,3 +808,18 @@ class ConfigMigrator():
neb.search_fallback = bool(check_setting_int(self.config_obj, old_id_uc, old_id + '_search_fallback', 0))
neb.seed_time = check_setting_int(self.config_obj, old_id_uc, old_id + '_seed_time', '')
neb._seed_ratio = check_setting_str(self.config_obj, old_id_uc, old_id + '_seed_ratio', '')
+
+ # Migration v16: Purge old cache image folder name
+ @staticmethod
+ def _migrate_v16():
+ if sickbeard.CACHE_DIR and ek.ek(os.path.isdir, sickbeard.CACHE_DIR):
+ cache_default = sickbeard.CACHE_DIR
+ dead_paths = ['anidb', 'imdb', 'trakt']
+ for path in dead_paths:
+ sickbeard.CACHE_DIR = '%s/images/%s' % (cache_default, path)
+ helpers.clearCache(True)
+ try:
+ ek.ek(os.rmdir, sickbeard.CACHE_DIR)
+ except OSError:
+ pass
+ sickbeard.CACHE_DIR = cache_default
diff --git a/sickbeard/helpers.py b/sickbeard/helpers.py
index 58f6bbe9..182c8566 100644
--- a/sickbeard/helpers.py
+++ b/sickbeard/helpers.py
@@ -1108,13 +1108,6 @@ def getURL(url, post_data=None, params=None, headers=None, timeout=30, session=N
2) Return True/False if success after using kwargs 'savefile' set to file pathname.
"""
- # download and save file or simply fetch url
- savename = None
- if 'savename' in kwargs:
- # session streaming
- session.stream = True
- savename = kwargs.pop('savename')
-
# selectively mute some errors
mute = []
for muted in filter(
@@ -1126,6 +1119,13 @@ def getURL(url, post_data=None, params=None, headers=None, timeout=30, session=N
if None is session:
session = CloudflareScraper.create_scraper()
+ # download and save file or simply fetch url
+ savename = None
+ if 'savename' in kwargs:
+ # session streaming
+ session.stream = True
+ savename = kwargs.pop('savename')
+
if 'nocache' in kwargs:
del kwargs['nocache']
else:
@@ -1478,8 +1478,8 @@ def cleanup_cache():
"""
Delete old cached files
"""
- delete_not_changed_in([ek.ek(os.path.join, sickbeard.CACHE_DIR, *x) for x in [
- ('images', 'trakt'), ('images', 'imdb'), ('images', 'anidb')]])
+ delete_not_changed_in([ek.ek(os.path.join, sickbeard.CACHE_DIR, 'images', 'browse', 'thumb', x) for x in [
+ 'anidb', 'imdb', 'trakt', 'tvdb']])
def delete_not_changed_in(paths, days=30, minutes=0):
diff --git a/sickbeard/indexers/indexer_config.py b/sickbeard/indexers/indexer_config.py
index 26a3b373..2f508499 100644
--- a/sickbeard/indexers/indexer_config.py
+++ b/sickbeard/indexers/indexer_config.py
@@ -1,4 +1,5 @@
from lib.tvdb_api.tvdb_api import Tvdb
+from lib.libtrakt.indexerapiinterface import TraktIndexer
INDEXER_TVDB = 1
INDEXER_TVRAGE = 2
@@ -65,9 +66,9 @@ indexerConfig = {
main_url='https://www.trakt.tv/',
id=INDEXER_TRAKT,
name='Trakt',
- module=None,
+ module=TraktIndexer,
api_params={},
- active=False,
+ active=True,
dupekey='trakt',
mapped_only=True,
icon='trakt16.png',
diff --git a/sickbeard/webserve.py b/sickbeard/webserve.py
index eb49aff4..ff6e749c 100644
--- a/sickbeard/webserve.py
+++ b/sickbeard/webserve.py
@@ -48,7 +48,7 @@ from sickbeard.common import SNATCHED, UNAIRED, IGNORED, ARCHIVED, WANTED, FAILE
from sickbeard.common import SD, HD720p, HD1080p, UHD2160p
from sickbeard.exceptions import ex
from sickbeard.helpers import has_image_ext, remove_article, starify
-from sickbeard.indexers.indexer_config import INDEXER_TVDB, INDEXER_TVRAGE
+from sickbeard.indexers.indexer_config import INDEXER_TVDB, INDEXER_TVRAGE, INDEXER_TRAKT
from sickbeard.scene_numbering import get_scene_numbering, set_scene_numbering, get_scene_numbering_for_show, \
get_xem_numbering_for_show, get_scene_absolute_numbering_for_show, get_xem_absolute_numbering_for_show, \
get_scene_absolute_numbering
@@ -67,10 +67,12 @@ from unidecode import unidecode
from lib.libtrakt import TraktAPI
from lib.libtrakt.exceptions import TraktException, TraktAuthException
+from lib.libtrakt.indexerapiinterface import TraktSearchTypes
from trakt_helpers import build_config, trakt_collection_remove_account
from sickbeard.bs4_parser import BS4Parser
from lib.tmdb_api import TMDB
from lib.tvdb_api.tvdb_exceptions import tvdb_exception
+from lib.fuzzywuzzy import fuzz
try:
import json
@@ -2558,8 +2560,9 @@ class NewHomeAddShows(Home):
def sanitizeFileName(self, name):
return helpers.sanitizeFileName(name)
+ # noinspection PyPep8Naming
def searchIndexersForShowName(self, search_term, lang='en', indexer=None):
- if not lang or lang == 'null':
+ if not lang or 'null' == lang:
lang = 'en'
term = search_term.decode('utf-8').strip()
terms = []
@@ -2573,22 +2576,31 @@ class NewHomeAddShows(Home):
results = {}
final_results = []
- search_id, indexer_id = '', None
- search_id = ''
+ search_id, indexer_id, trakt_id, tmdb_id, INDEXER_TVDB_X = '', None, None, None, INDEXER_TRAKT
try:
search_id = re.search(r'(?m)((?:tt\d{4,})|^\d{4,}$)', search_term).group(1)
- resp = [r for r in self.getTrakt('/search/%s/%s?type=show&extended=full' % (
- ('tvdb', 'imdb')['tt' in search_id], search_id)) if 'show' == r['type']][0]
- search_term = resp['show']['title']
- indexer_id = resp['show']['ids']['tvdb']
- except:
+
+ lINDEXER_API_PARMS = sickbeard.indexerApi(INDEXER_TVDB_X).api_params.copy()
+ lINDEXER_API_PARMS['language'] = lang
+ lINDEXER_API_PARMS['custom_ui'] = classes.AllShowsNoFilterListUI
+ lINDEXER_API_PARMS['sleep_retry'] = 5
+ lINDEXER_API_PARMS['search_type'] = (TraktSearchTypes.tvdb_id, TraktSearchTypes.imdb_id)['tt' in search_id]
+ t = sickbeard.indexerApi(INDEXER_TVDB_X).indexer(**lINDEXER_API_PARMS)
+
+ resp = t[search_id][0]
+ search_term = resp['seriesname']
+ indexer_id = resp['ids']['tvdb']
+ trakt_id = resp['ids'].get('trakt')
+ tmdb_id = resp['ids'].get('tmdb')
+
+ except (StandardError, Exception):
search_term = (search_term, '')['tt' in search_id]
- # Query Indexers for each search term and build the list of results
+ # query Indexers for search term and build list of results
for indexer in sickbeard.indexerApi().indexers if not int(indexer) else [int(indexer)]:
lINDEXER_API_PARMS = sickbeard.indexerApi(indexer).api_params.copy()
lINDEXER_API_PARMS['language'] = lang
- lINDEXER_API_PARMS['custom_ui'] = classes.AllShowsListUI
+ lINDEXER_API_PARMS['custom_ui'] = classes.AllShowsNoFilterListUI
t = sickbeard.indexerApi(indexer).indexer(**lINDEXER_API_PARMS)
try:
@@ -2596,96 +2608,152 @@ class NewHomeAddShows(Home):
if bool(indexer_id):
logger.log('Fetching show using id: %s (%s) from tv datasource %s' % (
search_id, search_term, sickbeard.indexerApi(indexer).name), logger.DEBUG)
- results.setdefault('tt' in search_id and 3 or indexer, []).extend(
- [{'id': indexer_id, 'seriesname': t[indexer_id, False]['seriesname'],
- 'firstaired': t[indexer_id, False]['firstaired'], 'network': t[indexer_id, False]['network'],
- 'overview': t[indexer_id, False]['overview'],
- 'genres': '' if not t[indexer_id, False]['genre'] else
- t[indexer_id, False]['genre'].lower().strip('|').replace('|', ', '),
- }])
+ r = t[indexer_id, False]
+ results.setdefault((indexer, INDEXER_TVDB_X)['tt' in search_id], {})[int(indexer_id)] = {
+ 'id': indexer_id, 'seriesname': r['seriesname'], 'firstaired': r['firstaired'],
+ 'network': r['network'], 'overview': r['overview'],
+ 'genres': '' if not r['genre'] else r['genre'].lower().strip('|').replace('|', ', '),
+ 'trakt_id': trakt_id, 'tmdb_id': tmdb_id
+ }
break
else:
logger.log('Searching for shows using search term: %s from tv datasource %s' % (
search_term, sickbeard.indexerApi(indexer).name), logger.DEBUG)
- tvdb_ids = []
+ results.setdefault(indexer, {})
for term in terms:
try:
for r in t[term]:
tvdb_id = int(r['id'])
- if tvdb_id not in tvdb_ids:
- tvdb_ids.append(tvdb_id)
- results.setdefault(indexer, []).extend([r.copy()])
+ if tvdb_id not in results[indexer]:
+ results.setdefault(indexer, {})[tvdb_id] = r.copy()
+ elif r['seriesname'] != results[indexer][tvdb_id]['seriesname']:
+ results[indexer][tvdb_id].setdefault('aliases', []).append(r['seriesname'])
except tvdb_exception:
pass
- except Exception as e:
+ except (StandardError, Exception):
pass
- # Query trakt for tvdb ids
- try:
- logger.log('Searching for show using search term: %s from tv datasource Trakt' % search_term, logger.DEBUG)
- resp = []
- for term in terms:
- result = self.getTrakt('/search/show?query=%s&extended=full' % term)
- resp += result
- match = False
- for r in result:
- if term == r.get('show', {}).get('title', ''):
- match = True
- if match:
+ # query trakt for tvdb ids
+ try:
+ logger.log('Searching for show using search term: %s from tv datasource Trakt' % search_term, logger.DEBUG)
+ resp = []
+ lINDEXER_API_PARMS = sickbeard.indexerApi(INDEXER_TVDB_X).api_params.copy()
+ lINDEXER_API_PARMS['language'] = lang
+ lINDEXER_API_PARMS['custom_ui'] = classes.AllShowsNoFilterListUI
+ lINDEXER_API_PARMS['sleep_retry'] = 5
+ lINDEXER_API_PARMS['search_type'] = TraktSearchTypes.text
+ t = sickbeard.indexerApi(INDEXER_TVDB_X).indexer(**lINDEXER_API_PARMS)
+
+ for term in terms:
+ result = t[term]
+ resp += result
+ match = False
+ for r in result:
+ if isinstance(r.get('seriesname'), (str, unicode)) \
+ and term.lower() == r.get('seriesname', '').lower():
+ match = True
break
- tvdb_ids = []
- results_trakt = []
- for item in resp:
- show = item['show']
- if 'tvdb' in show['ids'] and show['ids']['tvdb'] and show['ids']['tvdb'] not in tvdb_ids:
- results_trakt.append({
- 'id': show['ids']['tvdb'], 'seriesname': show['title'],
- 'firstaired': (show['first_aired'] and re.sub(r'T.*$', '', str(show['first_aired'])) or show['year']),
- 'network': show['network'], 'overview': show['overview'],
- 'genres': ', '.join(['%s' % v.lower() for v in show.get('genres', {}) or []])})
- tvdb_ids.append(show['ids']['tvdb'])
- results.update({3: results_trakt})
- if INDEXER_TVDB in results:
- tvdb_filtered = []
- for tvdb_item in results[INDEXER_TVDB]:
- if int(tvdb_item['id']) not in tvdb_ids:
- tvdb_filtered.append(tvdb_item)
- if tvdb_filtered:
- results[INDEXER_TVDB] = tvdb_filtered
- else:
- del(results[INDEXER_TVDB])
- except:
- pass
+ if match:
+ break
+ results_trakt = {}
+ for item in resp:
+ if 'tvdb' in item['ids'] and item['ids']['tvdb']:
+ if item['ids']['tvdb'] not in results[INDEXER_TVDB]:
+ results_trakt[int(item['ids']['tvdb'])] = {
+ 'id': item['ids']['tvdb'], 'seriesname': item['seriesname'],
+ 'genres': item['genres'].lower(), 'network': item['network'],
+ 'overview': item['overview'], 'firstaired': item['firstaired'],
+ 'trakt_id': item['ids']['trakt'], 'tmdb_id': item['ids']['tmdb']}
+ elif item['seriesname'] != results[INDEXER_TVDB][int(item['ids']['tvdb'])]['seriesname']:
+ results[INDEXER_TVDB][int(item['ids']['tvdb'])].setdefault(
+ 'aliases', []).append(item['seriesname'])
+ results.setdefault(INDEXER_TVDB_X, {}).update(results_trakt)
+ except (StandardError, Exception):
+ pass
- id_names = [None, sickbeard.indexerApi(INDEXER_TVDB).name, sickbeard.indexerApi(INDEXER_TVRAGE).name,
- '%s via Trakt' % sickbeard.indexerApi(INDEXER_TVDB).name]
+ id_names = {iid: (name, '%s via %s' % (sickbeard.indexerApi(INDEXER_TVDB).name, name))[INDEXER_TVDB_X == iid]
+ for iid, name in sickbeard.indexerApi().all_indexers.iteritems()}
+ # noinspection PyUnboundLocalVariable
map(final_results.extend,
- ([['%s%s' % (id_names[id], helpers.findCertainShow(sickbeard.showList, int(show['id'])) and ' - exists in db' or ''),
- (id, INDEXER_TVDB)[id == 3], sickbeard.indexerApi((id, INDEXER_TVDB)[id == 3]).config['show_url'], int(show['id']),
+ ([[id_names[iid], any([helpers.find_show_by_id(
+ sickbeard.showList, {(iid, INDEXER_TVDB)[INDEXER_TVDB_X == iid]: int(show['id'])},
+ no_mapped_ids=False)]),
+ iid, (iid, INDEXER_TVDB)[INDEXER_TVDB_X == iid],
+ sickbeard.indexerApi((iid, INDEXER_TVDB)[INDEXER_TVDB_X == iid]).config['show_url'], int(show['id']),
show['seriesname'], self.encode_html(show['seriesname']), show['firstaired'],
show.get('network', '') or '', show.get('genres', '') or '',
- re.sub(r'([,\.!][^,\.!]*?)$', '...',
- re.sub(r'([!\?\.])(?=\w)', r'\1 ',
- self.encode_html((show.get('overview', '') or '')[:250:].strip())))
- ] for show in shows] for id, shows in results.items()))
+ re.sub(r'([,.!][^,.!]*?)$', '...',
+ re.sub(r'([.!?])(?=\w)', r'\1 ',
+ self.encode_html((show.get('overview', '') or '')[:250:].strip()))),
+ self._get_UWRatio(term, show['seriesname'], show.get('aliases', [])), None, None,
+ self._make_search_image_url(iid, show)
+ ] for show in shows.itervalues()] for iid, shows in results.iteritems()))
- lang_id = sickbeard.indexerApi().config['langabbv_to_id'][lang]
- return json.dumps({
- 'results': sorted(final_results, reverse=True, key=lambda x: dateutil.parser.parse(
- re.match('^(?:19|20)\d\d$', str(x[6])) and ('%s-12-31' % str(x[6])) or (x[6] and str(x[6])) or '1900')),
- 'langid': lang_id})
+ def final_order(sortby_index, data, final_sort):
+ idx_is_indb = 1
+ for (n, x) in enumerate(data):
+ x[sortby_index] = n + (1000, 0)[x[idx_is_indb] and 'notop' not in sickbeard.RESULTS_SORTBY]
+ return data if not final_sort else sorted(data, reverse=False, key=lambda x: x[sortby_index])
- def getTrakt(self, url, *args, **kwargs):
+ def sort_date(data_result, is_last_sort):
+ idx_date_sort, idx_src, idx_aired = 13, 2, 8
+ return final_order(
+ idx_date_sort,
+ sorted(
+ sorted(data_result, reverse=True, key=lambda x: (dateutil.parser.parse(
+ re.match('^(?:19|20)\d\d$', str(x[idx_aired])) and ('%s-12-31' % str(x[idx_aired]))
+ or (x[idx_aired] and str(x[idx_aired])) or '1900'))),
+ reverse=False, key=lambda x: x[idx_src]), is_last_sort)
- filtered = []
- try:
- resp = TraktAPI().trakt_request(url, sleep_retry=5)
- if len(resp):
- filtered = resp
- except TraktException as e:
- logger.log(u'Could not connect to Trakt service: %s' % ex(e), logger.WARNING)
+ def sort_az(data_result, is_last_sort):
+ idx_az_sort, idx_src, idx_title = 14, 2, 6
+ return final_order(
+ idx_az_sort,
+ sorted(
+ data_result, reverse=False, key=lambda x: (
+ x[idx_src],
+ (remove_article(x[idx_title].lower()), x[idx_title].lower())[sickbeard.SORT_ARTICLE])),
+ is_last_sort)
- return filtered
+ def sort_rel(data_result, is_last_sort):
+ idx_rel_sort, idx_src, idx_rel = 12, 2, 12
+ return final_order(
+ idx_rel_sort,
+ sorted(
+ sorted(data_result, reverse=True, key=lambda x: x[idx_rel]),
+ reverse=False, key=lambda x: x[idx_src]), is_last_sort)
+
+ if 'az' == sickbeard.RESULTS_SORTBY[:2]:
+ sort_results = [sort_date, sort_rel, sort_az]
+ elif 'date' == sickbeard.RESULTS_SORTBY[:4]:
+ sort_results = [sort_az, sort_rel, sort_date]
+ else:
+ sort_results = [sort_az, sort_date, sort_rel]
+
+ for n, func in enumerate(sort_results):
+ final_results = func(final_results, n == len(sort_results) - 1)
+
+ return json.dumps({'results': final_results, 'langid': sickbeard.indexerApi().config['langabbv_to_id'][lang]})
+
+ @staticmethod
+ def _make_search_image_url(iid, show):
+ img_url = ''
+ if INDEXER_TRAKT == iid:
+ img_url = 'imagecache?path=browse/thumb/trakt&filename=%s&trans=0&tmdbid=%s&tvdbid=%s' % \
+ ('%s.jpg' % show['trakt_id'], show.get('tmdb_id'), show.get('id'))
+ elif INDEXER_TVDB == iid:
+ img_url = 'imagecache?path=browse/thumb/tvdb&filename=%s&trans=0&tvdbid=%s' % \
+ ('%s.jpg' % show['id'], show['id'])
+ return img_url
+
+ def _get_UWRatio(self, search_term, showname, aliases):
+ s = fuzz.UWRatio(search_term, showname)
+ # check aliases and give them a little lower score
+ for a in aliases:
+ ns = fuzz.UWRatio(search_term, a) - 1
+ if ns > s:
+ s = ns
+ return s
def massAddTable(self, rootDir=None, **kwargs):
t = PageTemplate(headers=self.request.headers, file='home_massAddTable.tmpl')
@@ -2900,7 +2968,7 @@ class NewHomeAddShows(Home):
newest = dt_string
img_uri = 'http://img7.anidb.net/pics/anime/%s' % image
- images = dict(poster=dict(thumb='imagecache?path=anidb&source=%s' % img_uri))
+ images = dict(poster=dict(thumb='imagecache?path=browse/thumb/anidb&source=%s' % img_uri))
sickbeard.CACHE_IMAGE_URL_LIST.add_url(img_uri)
votes = rating = 0
@@ -3050,7 +3118,7 @@ class NewHomeAddShows(Home):
dims = [row.get('poster', {}).get('width', 0), row.get('poster', {}).get('height', 0)]
s = [scale(x, int(max(dims))) for x in dims]
img_uri = re.sub('(?im)(.*V1_?)(\..*?)$', r'\1UX%s_CR0,0,%s,%s_AL_\2' % (s[0], s[0], s[1]), img_uri)
- images = dict(poster=dict(thumb='imagecache?path=imdb&source=%s' % img_uri))
+ images = dict(poster=dict(thumb='imagecache?path=browse/thumb/imdb&source=%s' % img_uri))
sickbeard.CACHE_IMAGE_URL_LIST.add_url(img_uri)
filtered.append(dict(
@@ -3133,7 +3201,7 @@ class NewHomeAddShows(Home):
match.group(12)]
img_uri = img_uri.replace(match.group(), ''.join(
[str(y) for x in map(None, parts, scaled) for y in x if y is not None]))
- images = dict(poster=dict(thumb='imagecache?path=imdb&source=%s' % img_uri))
+ images = dict(poster=dict(thumb='imagecache?path=browse/thumb/imdb&source=%s' % img_uri))
sickbeard.CACHE_IMAGE_URL_LIST.add_url(img_uri)
filtered.append(dict(
@@ -3401,7 +3469,7 @@ class NewHomeAddShows(Home):
tmdbid = item.get('show', {}).get('ids', {}).get('tmdb', 0)
tvdbid = item.get('show', {}).get('ids', {}).get('tvdb', 0)
traktid = item.get('show', {}).get('ids', {}).get('trakt', 0)
- images = dict(poster=dict(thumb='imagecache?path=trakt/poster/thumb&filename=%s&tmdbid=%s&tvdbid=%s' %
+ images = dict(poster=dict(thumb='imagecache?path=browse/thumb/trakt&filename=%s&tmdbid=%s&tvdbid=%s' %
('%s.jpg' % traktid, tmdbid, tvdbid)))
filtered.append(dict(
@@ -4631,6 +4699,19 @@ class ConfigGeneral(Config):
def saveRootDirs(self, rootDirString=None):
sickbeard.ROOT_DIRS = rootDirString
+ def saveResultPrefs(self, ui_results_sortby=None):
+
+ if ui_results_sortby in ('az', 'date', 'rel', 'notop', 'ontop'):
+ was_ontop = 'notop' not in sickbeard.RESULTS_SORTBY
+ if 'top' == ui_results_sortby[-3:]:
+ maybe_ontop = ('', ' notop')[was_ontop]
+ sortby = sickbeard.RESULTS_SORTBY.replace(' notop', '')
+ sickbeard.RESULTS_SORTBY = '%s%s' % (('rel', sortby)[any([sortby])], maybe_ontop)
+ else:
+ sickbeard.RESULTS_SORTBY = '%s%s' % (ui_results_sortby, (' notop', '')[was_ontop])
+
+ sickbeard.save_config()
+
def saveAddShowDefaults(self, default_status, any_qualities='', best_qualities='', default_wanted_begin=None,
default_wanted_latest=None, default_flatten_folders=False, default_scene=False,
default_subtitles=False, default_anime=False, default_tag=''):
@@ -6107,26 +6188,33 @@ class CachedImages(MainHandler):
tmdbimage = False
if source is not None and source in sickbeard.CACHE_IMAGE_URL_LIST:
s = source
- if source is None and tmdbid not in [None, 0, '0'] and self.should_try_image(static_image_path, 'tmdb'):
+ if source is None and tmdbid not in [None, 'None', 0, '0'] \
+ and self.should_try_image(static_image_path, 'tmdb'):
tmdbimage = True
try:
tmdbapi = TMDB(sickbeard.TMDB_API_KEY)
tmdbconfig = tmdbapi.Configuration().info()
images = tmdbapi.TV(helpers.tryInt(tmdbid)).images()
- s = '%s%s%s' % (tmdbconfig['images']['base_url'], tmdbconfig['images']['poster_sizes'][3], sorted(images['posters'], key=lambda x: x['vote_average'], reverse=True)[0]['file_path']) if len(images['posters']) > 0 else ''
- except:
+ s = '%s%s%s' % (tmdbconfig['images']['base_url'], tmdbconfig['images']['poster_sizes'][3],
+ sorted(images['posters'], key=lambda x: x['vote_average'],
+ reverse=True)[0]['file_path']) if len(images['posters']) > 0 else ''
+ except (StandardError, Exception):
s = ''
if s and not helpers.download_file(s, static_image_path) and s.find('trakt.us'):
helpers.download_file(s.replace('trakt.us', 'trakt.tv'), static_image_path)
if tmdbimage and not ek.ek(os.path.isfile, static_image_path):
self.create_dummy_image(static_image_path, 'tmdb')
- if source is None and tvdbid not in [None, 0, '0'] and not ek.ek(os.path.isfile, static_image_path) and self.should_try_image(static_image_path, 'tvdb'):
+ if source is None and tvdbid not in [None, 'None', 0, '0'] \
+ and not ek.ek(os.path.isfile, static_image_path) \
+ and self.should_try_image(static_image_path, 'tvdb'):
try:
- r = sickbeard.indexerApi(INDEXER_TVDB).indexer()[helpers.tryInt(tvdbid), False]
+ lINDEXER_API_PARMS = sickbeard.indexerApi(INDEXER_TVDB).api_params.copy()
+ lINDEXER_API_PARMS['posters'] = True
+ r = sickbeard.indexerApi(INDEXER_TVDB).indexer(**lINDEXER_API_PARMS)[helpers.tryInt(tvdbid), False]
if hasattr(r, 'data') and 'poster' in r.data:
s = r.data['poster']
- except:
+ except (StandardError, Exception):
s = ''
if s:
helpers.download_file(s, static_image_path)
@@ -6137,7 +6225,13 @@ class CachedImages(MainHandler):
self.delete_all_dummy_images(static_image_path)
if not ek.ek(os.path.isfile, static_image_path):
- self.redirect('images/trans.png')
+ static_image_path = ek.ek(os.path.join, sickbeard.PROG_DIR, 'gui', 'slick',
+ 'images', ('image-light.png', 'trans.png')[bool(int(kwargs.get('trans', 1)))])
else:
helpers.set_file_timestamp(static_image_path, min_age=3, new_time=None)
- self.redirect('cache/images/%s/%s' % (path, file_name))
+
+ mime_type, encoding = MimeTypes().guess_type(static_image_path)
+ self.set_header('Content-Type', mime_type)
+ with open(static_image_path, 'rb') as img:
+ return img.read()
+