Upgraded IMDBpy and improved performance of it.

Fixed TVDB Indexer API issues.
This commit is contained in:
echel0n 2014-06-07 08:33:18 -07:00
parent 4784b4619f
commit cfcc35ebcc
6 changed files with 25 additions and 36 deletions

View file

@ -29,7 +29,7 @@
[imdbpy] [imdbpy]
## Default. ## Default.
accessSystem = http accessSystem = httpThin
## Optional (options common to every data access system): ## Optional (options common to every data access system):
# Activate adult searches (on, by default). # Activate adult searches (on, by default).
@ -69,7 +69,7 @@ accessSystem = http
## Set the threshold for logging messages. ## Set the threshold for logging messages.
# Can be one of "debug", "info", "warning", "error", "critical" (default: # Can be one of "debug", "info", "warning", "error", "critical" (default:
# "warning"). # "warning").
#loggingLevel = debug loggingLevel = debug
## Path to a configuration file for the logging facility; ## Path to a configuration file for the logging facility;
# see: http://docs.python.org/library/logging.html#configuring-logging # see: http://docs.python.org/library/logging.html#configuring-logging

View file

@ -598,7 +598,7 @@ class Tvdb:
zipdata = StringIO.StringIO() zipdata = StringIO.StringIO()
zipdata.write(resp.content) zipdata.write(resp.content)
myzipfile = zipfile.ZipFile(zipdata) myzipfile = zipfile.ZipFile(zipdata)
return xmltodict.parse(myzipfile.read('%s.xml' % language).strip().encode('utf-8'), postprocessor=process) return xmltodict.parse(myzipfile.read('%s.xml' % language), postprocessor=process)
except zipfile.BadZipfile: except zipfile.BadZipfile:
raise tvdb_error("Bad zip file received from thetvdb.com, could not read it") raise tvdb_error("Bad zip file received from thetvdb.com, could not read it")
else: else:

View file

@ -179,7 +179,7 @@ for curFile in auto_process_files:
setup( setup(
options = {'py2exe': {'bundle_files': 1}}, options = {'py2exe': {'bundle_files': 1}},
zipfile = None, zipfile = None,
console = ['updater.py'], console = ['updater.py'], requires=['Cheetah']
) )
if 'test' in oldArgs: if 'test' in oldArgs:

View file

@ -163,9 +163,9 @@ class AllShowsListUI:
continue continue
if 'seriesname' in curShow: if 'seriesname' in curShow:
seriesnames.append(curShow['seriesname'].encode('utf-8')) seriesnames.append(curShow['seriesname'])
if 'aliasnames' in curShow: if 'aliasnames' in curShow:
seriesnames.extend(curShow['aliasnames'].encode('utf-8').split('|')) seriesnames.extend(curShow['aliasnames'].split('|'))
for name in seriesnames: for name in seriesnames:
if searchterm.lower() in name.lower(): if searchterm.lower() in name.lower():

View file

@ -52,6 +52,7 @@ from common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, SNATCHED_BEST, ARCHIVE
from common import NAMING_DUPLICATE, NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_SEPARATED_REPEAT, \ from common import NAMING_DUPLICATE, NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_SEPARATED_REPEAT, \
NAMING_LIMITED_EXTEND_E_PREFIXED NAMING_LIMITED_EXTEND_E_PREFIXED
class TVShow(object): class TVShow(object):
def __init__(self, indexer, indexerid, lang=""): def __init__(self, indexer, indexerid, lang=""):
@ -891,21 +892,11 @@ class TVShow(object):
imdb_info[key] = imdbTv.get(key.replace('_', ' ')) imdb_info[key] = imdbTv.get(key.replace('_', ' '))
# Filter only the value # Filter only the value
if imdb_info['runtimes']: imdb_info['runtimes'] = re.search('\d+', imdb_info['runtimes']).group(0) or self.runtime
imdb_info['runtimes'] = re.search('\d+', imdb_info['runtimes']).group(0) imdb_info['akas'] = '|'.join(imdb_info['akas']) or ''
else:
imdb_info['runtimes'] = self.runtime
if imdb_info['akas']:
imdb_info['akas'] = '|'.join(imdb_info['akas'])
else:
imdb_info['akas'] = ''
# Join all genres in a string # Join all genres in a string
if imdb_info['genres']: imdb_info['genres'] = '|'.join(imdb_info['genres']) or ''
imdb_info['genres'] = '|'.join(imdb_info['genres'])
else:
imdb_info['genres'] = ''
# Get only the production country certificate if any # Get only the production country certificate if any
if imdb_info['certificates'] and imdb_info['countries']: if imdb_info['certificates'] and imdb_info['countries']:
@ -921,11 +912,7 @@ class TVShow(object):
else: else:
imdb_info['certificates'] = '' imdb_info['certificates'] = ''
if imdb_info['country_codes']: imdb_info['country_codes'] = '|'.join(imdb_info['country_codes']) or ''
imdb_info['country_codes'] = '|'.join(imdb_info['country_codes'])
else:
imdb_info['country_codes'] = ''
imdb_info['last_update'] = datetime.date.today().toordinal() imdb_info['last_update'] = datetime.date.today().toordinal()
# Rename dict keys without spaces for DB upsert # Rename dict keys without spaces for DB upsert
@ -1855,7 +1842,8 @@ class TVEpisode(object):
"location = ?, file_size = ?, release_name = ?, is_proper = ?, showid = ?, season = ?, episode = ?, " "location = ?, file_size = ?, release_name = ?, is_proper = ?, showid = ?, season = ?, episode = ?, "
"absolute_number = ? WHERE episode_id = ?", "absolute_number = ? WHERE episode_id = ?",
[self.indexerid, self.indexer, self.name, self.description, ",".join([sub for sub in self.subtitles]), [self.indexerid, self.indexer, self.name, self.description, ",".join([sub for sub in self.subtitles]),
self.subtitles_searchcount, self.subtitles_lastsearch, self.airdate.toordinal(), self.hasnfo, self.hastbn, self.subtitles_searchcount, self.subtitles_lastsearch, self.airdate.toordinal(), self.hasnfo,
self.hastbn,
self.status, self.location, self.file_size, self.release_name, self.is_proper, self.show.indexerid, self.status, self.location, self.file_size, self.release_name, self.is_proper, self.show.indexerid,
self.season, self.episode, self.absolute_number, epID]] self.season, self.episode, self.absolute_number, epID]]
else: else:
@ -1863,10 +1851,12 @@ class TVEpisode(object):
return [ return [
"INSERT OR IGNORE INTO tv_episodes (episode_id, indexerid, indexer, name, description, subtitles, subtitles_searchcount, subtitles_lastsearch, airdate, hasnfo, hastbn, status, location, file_size, release_name, is_proper, showid, season, episode, absolute_number) VALUES " "INSERT OR IGNORE INTO tv_episodes (episode_id, indexerid, indexer, name, description, subtitles, subtitles_searchcount, subtitles_lastsearch, airdate, hasnfo, hastbn, status, location, file_size, release_name, is_proper, showid, season, episode, absolute_number) VALUES "
"((SELECT episode_id FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?),?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);", "((SELECT episode_id FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?),?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);",
[self.show.indexerid, self.season, self.episode, self.indexerid, self.indexer, self.name, self.description, [self.show.indexerid, self.season, self.episode, self.indexerid, self.indexer, self.name,
self.description,
",".join([sub for sub in self.subtitles]), self.subtitles_searchcount, self.subtitles_lastsearch, ",".join([sub for sub in self.subtitles]), self.subtitles_searchcount, self.subtitles_lastsearch,
self.airdate.toordinal(), self.hasnfo, self.hastbn, self.status, self.location, self.file_size, self.airdate.toordinal(), self.hasnfo, self.hastbn, self.status, self.location, self.file_size,
self.release_name, self.is_proper, self.show.indexerid, self.season, self.episode, self.absolute_number]] self.release_name, self.is_proper, self.show.indexerid, self.season, self.episode,
self.absolute_number]]
def saveToDB(self, forceSave=False): def saveToDB(self, forceSave=False):
""" """

View file

@ -79,7 +79,6 @@ except ImportError:
from sickbeard import browser from sickbeard import browser
from lib import adba from lib import adba
def _handle_reverse_proxy(): def _handle_reverse_proxy():
if sickbeard.HANDLE_REVERSE_PROXY: if sickbeard.HANDLE_REVERSE_PROXY:
cherrypy.lib.cptools.proxy() cherrypy.lib.cptools.proxy()