Change refresh page when torrent providers are enabled/disabled.

Change only display Search Settings/"Usenet retention" if Search NZBs is enabled.
Change update IMDb show parser.
This commit is contained in:
JackDandy 2016-11-20 02:23:37 +00:00
parent 96a144ba6a
commit 1e7c8e5576
4 changed files with 104 additions and 12 deletions

View file

@ -196,6 +196,8 @@
* Change speed improvement in finding needed categories/qualities (sd, hd, uhd)
* Change add guidance when using the "unknown" quality selection
* Change prevent browser auto completing password fields on config pages
* Change refresh page when torrent providers are enabled/disabled
* Change only display Search Settings/"Usenet retention" if Search NZBs is enabled
[develop changelog]
* Change send nzb data to NZBGet for Anizb instead of url
@ -237,6 +239,7 @@
* Fix UHD category select in Recent Search
* Change only update enabled torrent providers
* Fix restart when switching from master to develop
* Change update IMDb show parser
### 0.11.16 (2016-10-16 17:30:00 UTC)

View file

@ -104,11 +104,13 @@
$x.providerType == $GenericProvider.TORRENT and $sickbeard.USE_TORRENTS]
#set $cur_name = $cur_provider.get_id()
#set $cur_url = $cur_provider.url
#set $tip = ($cur_provider.name, 'Site Down')[not $cur_url]
#set $state = ('', ' <span class="red-text">(Site Down?)</span>')[not $cur_url]
#set $bad_url = not $cur_url and cur_provider.is_enabled()
#set $tip = ($cur_provider.name + ('', ' (enable for link)')[not $cur_url and not cur_provider.is_enabled()],
'Site Down')[$bad_url]
#set $state = ('', ' <span class="red-text">(Site Down?)</span>')[$bad_url]
<li class="ui-state-default" id="$cur_name">
<input type="checkbox" id="enable_$cur_name" class="provider_enabler" <%= html_checked if cur_provider.is_enabled() else '' %>/>
<a href="<%= anon_url(cur_url) %>" class="imgLink" rel="noreferrer" onclick="window.open(this.href,'_blank');return false;"><img src="$sbRoot/images/providers/$cur_provider.image_name()" alt="$tip" title="$tip" width="16" height="16" style="vertical-align:middle" /></a>
<a class="imgLink" #if $cur_url#href="<%= anon_url(cur_url) %>" onclick="window.open(this.href,'_blank');return false;"#else#name=""#end if# rel="noreferrer"><img src="$sbRoot/images/providers/$cur_provider.image_name()" alt="$tip" title="$tip" width="16" height="16" style="vertical-align:middle" /></a>
<span style="vertical-align:middle">$cur_provider.name$state</span>
#if $cur_provider.is_public_access() and type($cur_provider).__name__ not in ['TorrentRssProvider']
<span style="font-size:10px;vertical-align:top;font-weight:normal">(PA)</span>

View file

@ -119,7 +119,7 @@
</span>
</label>
</div>
#if $sickbeard.USE_NZBS
<div class="field-pair">
<label>
<span class="component-title">Usenet retention</span>
@ -129,7 +129,9 @@
</span>
</label>
</div>
#else
<input type="hidden" name="usenet_retention" value="$sickbeard.USENET_RETENTION" class="form-control input-sm input75">
#end if
<div class="field-pair">
<label>
<span class="component-title">Ignore result with any word</span>

View file

@ -44,7 +44,7 @@ from sickbeard import encodingKludge as ek
from sickbeard.providers import newznab, rsstorrent
from sickbeard.common import Quality, Overview, statusStrings, qualityPresetStrings
from sickbeard.common import SNATCHED, UNAIRED, IGNORED, ARCHIVED, WANTED, FAILED, SKIPPED, DOWNLOADED, SNATCHED_BEST, SNATCHED_PROPER
from sickbeard.common import SD, HD720p, HD1080p
from sickbeard.common import SD, HD720p, HD1080p, UHD2160p
from sickbeard.exceptions import ex
from sickbeard.helpers import has_image_ext, remove_article, starify
from sickbeard.indexers.indexer_config import INDEXER_TVDB, INDEXER_TVRAGE
@ -2752,6 +2752,72 @@ class NewHomeAddShows(Home):
overview = text
return overview
def parse_imdb(self, data, filtered, kwargs):
oldest, newest, oldest_dt, newest_dt = None, None, 9999999, 0
show_list = (data or {}).get('list', {}).get('items', {})
idx_ids = dict([(x.imdbid, (x.indexer, x.indexerid)) for x in sickbeard.showList if getattr(x, 'imdbid', None)])
# list_id = (data or {}).get('list', {}).get('id', {})
for row in show_list:
row = data.get('titles', {}).get(row.get('const', None), None)
if not row:
continue
try:
ids = dict(imdb=row.get('id', ''))
year, ended = 2 * [None]
if 2 == len(row.get('primary').get('year')):
year, ended = row.get('primary').get('year')
dt_ordinal = 0
if year:
dt = dateutil.parser.parse('01-01-%s' % year)
dt_ordinal = dt.toordinal()
if dt_ordinal < oldest_dt:
oldest_dt = dt_ordinal
oldest = year
if dt_ordinal > newest_dt:
newest_dt = dt_ordinal
newest = year
overview = row.get('plot')
rating = row.get('ratings', {}).get('rating', 0)
voting = row.get('ratings', {}).get('votes', 0)
images = {}
img_uri = '%s' % row.get('poster', {}).get('url', '')
if img_uri and 'tv_series.gif' not in img_uri and 'nopicture' not in img_uri:
scale = (lambda low1, high1: int((float(450) / high1) * low1))
dims = [row.get('poster', {}).get('width', 0), row.get('poster', {}).get('height', 0)]
s = [scale(x, int(max(dims))) for x in dims]
img_uri = re.sub('(?im)(.*V1_?)(\..*?)$', r'\1UX%s_CR0,0,%s,%s_AL_\2' % (s[0], s[0], s[1]), img_uri)
images = dict(poster=dict(thumb='imagecache?path=imdb&source=%s' % img_uri))
sickbeard.CACHE_IMAGE_URL_LIST.add_url(img_uri)
filtered.append(dict(
premiered=dt_ordinal,
premiered_str=year or 'No year',
ended_str=ended or '',
when_past=dt_ordinal < datetime.datetime.now().toordinal(), # air time not poss. 16.11.2015
genres=', '.join(row.get('metadata', {}).get('genres', {})) or 'No genre yet',
ids=ids,
images='' if not img_uri else images,
overview='No overview yet' if not overview else self.encode_html(overview[:250:]),
rating=int(helpers.tryFloat(rating) * 10),
title=row.get('primary').get('title'),
url_src_db='http://www.imdb.com/%s/' % row.get('primary').get('href').strip('/'),
votes=helpers.tryInt(voting, 'TBA')))
indexer, indexerid = idx_ids.get(ids['imdb'], (None, None))
src = ((None, 'tvrage')[INDEXER_TVRAGE == indexer], 'tvdb')[INDEXER_TVDB == indexer]
if src:
filtered[-1]['ids'][src] = indexerid
filtered[-1]['url_' + src] = '%s%s' % (
sickbeard.indexerApi(indexer).config['show_url'], indexerid)
except (AttributeError, TypeError, KeyError, IndexError):
pass
kwargs.update(dict(oldest=oldest, newest=newest))
return show_list and True or None
def parse_imdb_html(self, html, filtered, kwargs):
img_size = re.compile(r'(?im)(V1[^XY]+([XY]))(\d+)([^\d]+)(\d+)([^\d]+)(\d+)([^\d]+)(\d+)([^\d]+)(\d+)(.*?)$')
@ -2861,12 +2927,18 @@ class NewHomeAddShows(Home):
list_name += ('\'s', '')['your' == list_name.replace('(Off) ', '').lower()]
url = 'http://www.imdb.com/user/ur%s/watchlist' % acc_id
url_data = '/_ajax?sort=date_added,desc&mode=detail&page=1&title_type=tvSeries%2CtvEpisode&ref_=wl_vm_dtl'
url_ui = '?mode=detail&page=1&sort=date_added,desc&title_type=tvSeries%2CtvEpisode&ref_=wl_ref_typ'
html = helpers.getURL(url + url_data, headers={'Accept-Language': 'en-US'})
html = helpers.getURL(url + url_ui, headers={'Accept-Language': 'en-US'})
if html:
show_list_found = self.parse_imdb_html(html, filtered, kwargs)
show_list_found = None
try:
data = json.loads((re.findall(r'(?im)IMDb.*?Initial.*?\.push\((.*)\).*?$', html) or ['{}'])[0])
show_list_found = self.parse_imdb(data, filtered, kwargs)
except (StandardError, Exception):
pass
if not show_list_found:
show_list_found = self.parse_imdb_html(html, filtered, kwargs)
kwargs.update(dict(start_year=start_year))
if len(filtered):
@ -2900,7 +2972,14 @@ class NewHomeAddShows(Home):
url = 'http://www.imdb.com/search/title?at=0&sort=moviemeter&title_type=tv_series&year=%s,%s' % (start_year, end_year)
html = helpers.getURL(url, headers={'Accept-Language': 'en-US'})
if html:
self.parse_imdb_html(html, filtered, kwargs)
show_list_found = None
try:
data = json.loads((re.findall(r'(?im)IMDb.*?Initial.*?\.push\((.*)\).*?$', html) or ['{}'])[0])
show_list_found = self.parse_imdb(data, filtered, kwargs)
except (StandardError, Exception):
pass
if not show_list_found:
self.parse_imdb_html(html, filtered, kwargs)
kwargs.update(dict(mode=mode, periods=periods))
if len(filtered):
@ -4906,6 +4985,7 @@ class ConfigProviders(Config):
# add all the newznab info we have into our list
newznab_sources = dict(zip([x.get_id() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
active_ids = []
reload_page = False
if newznab_string:
for curNewznabProviderStr in newznab_string.split('!!!'):
@ -4995,8 +5075,10 @@ class ConfigProviders(Config):
provider_list.append(src_name)
src_enabled = bool(config.to_int(src_enabled))
if src_name in sources and hasattr(sources[src_name], 'enabled'):
if '' != getattr(sources[src_name], 'enabled', '') and sources[src_name].is_enabled() != src_enabled:
sources[src_name].enabled = src_enabled
if not reload_page and sickbeard.GenericProvider.TORRENT == sources[src_name].providerType:
reload_page = True
if src_name in newznab_sources:
newznab_sources[src_name].enabled = src_enabled
@ -5091,7 +5173,10 @@ class ConfigProviders(Config):
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE))
self.redirect('/config/providers/')
if reload_page:
self.write('reload')
else:
self.redirect('/config/providers/')
class ConfigNotifications(Config):