mirror of
https://github.com/SickGear/SickGear.git
synced 2024-12-01 00:43:37 +00:00
Fixed subliminal issues.
Added ignore/required words option to bet set individually for each show. Fixed issue with global ignore words not properly matching against releases. Fixed issue with
This commit is contained in:
parent
9171a28f68
commit
c945726f05
28 changed files with 654 additions and 124 deletions
|
@ -118,6 +118,17 @@ This <b>DOES NOT</b> allow Sick Beard to download non-english TV episodes!<br />
|
|||
(check this to have the episode archived after the first best match is found from your archive quality list)
|
||||
<br />
|
||||
#end if
|
||||
|
||||
<b>Ignored Words:</b> <input type="text" name="rls_ignore_words" id="rls_ignore_words" value="$show.rls_ignore_words" size="50" /><br />
|
||||
Results with any of these words in the title will be filtered out <br />
|
||||
Separate words with a comma, e.g. "word1,word2,word3"
|
||||
<br /><br />
|
||||
|
||||
<b>Required Words:</b> <input type="text" name="rls_require_words" id="rls_require_words" value="$show.rls_require_words" size="50" /><br />
|
||||
Results without one of these words in the title will be filtered out <br />
|
||||
Separate words with a comma, e.g. "word1,word2,word3"
|
||||
<br /><br />
|
||||
|
||||
<input type="submit" id="submit" value="Submit" class="btn btn-primary" />
|
||||
</form>
|
||||
|
||||
|
|
|
@ -31,4 +31,4 @@ except ImportError:
|
|||
|
||||
__all__ = ['SERVICES', 'LANGUAGE_INDEX', 'SERVICE_INDEX', 'SERVICE_CONFIDENCE',
|
||||
'MATCHING_CONFIDENCE', 'list_subtitles', 'download_subtitles', 'Pool']
|
||||
logging.getLogger(__name__).addHandler(NullHandler())
|
||||
logging.getLogger("subliminal").addHandler(NullHandler())
|
||||
|
|
|
@ -23,7 +23,7 @@ import logging
|
|||
|
||||
|
||||
__all__ = ['list_subtitles', 'download_subtitles']
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
def list_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None):
|
||||
|
@ -94,7 +94,10 @@ def download_subtitles(paths, languages=None, services=None, force=True, multi=F
|
|||
order = order or [LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE]
|
||||
subtitles_by_video = list_subtitles(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter)
|
||||
for video, subtitles in subtitles_by_video.iteritems():
|
||||
subtitles.sort(key=lambda s: key_subtitles(s, video, languages, services, order), reverse=True)
|
||||
try:
|
||||
subtitles.sort(key=lambda s: key_subtitles(s, video, languages, services, order), reverse=True)
|
||||
except StopIteration:
|
||||
break
|
||||
results = []
|
||||
service_instances = {}
|
||||
tasks = create_download_tasks(subtitles_by_video, languages, multi)
|
||||
|
|
|
@ -26,7 +26,7 @@ import threading
|
|||
|
||||
|
||||
__all__ = ['Worker', 'Pool']
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
class Worker(threading.Thread):
|
||||
|
|
|
@ -27,7 +27,7 @@ except ImportError:
|
|||
|
||||
|
||||
__all__ = ['Cache', 'cachedmethod']
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
class Cache(object):
|
||||
|
|
|
@ -31,8 +31,8 @@ import logging
|
|||
__all__ = ['SERVICES', 'LANGUAGE_INDEX', 'SERVICE_INDEX', 'SERVICE_CONFIDENCE', 'MATCHING_CONFIDENCE',
|
||||
'create_list_tasks', 'create_download_tasks', 'consume_task', 'matching_confidence',
|
||||
'key_subtitles', 'group_by_video']
|
||||
logger = logging.getLogger(__name__)
|
||||
SERVICES = ['opensubtitles', 'bierdopje', 'subswiki', 'subtitulos', 'thesubdb', 'addic7ed', 'tvsubtitles']
|
||||
logger = logging.getLogger("subliminal")
|
||||
SERVICES = ['opensubtitles', 'subswiki', 'subtitulos', 'thesubdb', 'addic7ed', 'tvsubtitles', 'itasa', 'usub']
|
||||
LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE = range(4)
|
||||
|
||||
|
||||
|
|
|
@ -15,4 +15,4 @@
|
|||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
||||
__version__ = '0.6.2'
|
||||
__version__ = '0.6.3'
|
||||
|
|
|
@ -20,7 +20,7 @@ import re
|
|||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
COUNTRIES = [('AF', 'AFG', '004', u'Afghanistan'),
|
||||
|
@ -619,6 +619,7 @@ LANGUAGES = [('aar', '', 'aa', u'Afar', u'afar'),
|
|||
('pli', '', 'pi', u'Pali', u'pali'),
|
||||
('pol', '', 'pl', u'Polish', u'polonais'),
|
||||
('pon', '', '', u'Pohnpeian', u'pohnpei'),
|
||||
('pob', '', 'pb', u'Brazilian Portuguese', u'brazilian portuguese'),
|
||||
('por', '', 'pt', u'Portuguese', u'portugais'),
|
||||
('pra', '', '', u'Prakrit languages', u'prâkrit, langues'),
|
||||
('pro', '', '', u'Provençal, Old (to 1500)', u'provençal ancien (jusqu\'à 1500)'),
|
||||
|
|
|
@ -27,7 +27,7 @@ import zipfile
|
|||
|
||||
|
||||
__all__ = ['ServiceBase', 'ServiceConfig']
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
class ServiceBase(object):
|
||||
|
@ -82,7 +82,7 @@ class ServiceBase(object):
|
|||
"""Initialize connection"""
|
||||
logger.debug(u'Initializing %s' % self.__class__.__name__)
|
||||
self.session = requests.session()
|
||||
self.session.headers.update({'User-Agent': self.user_agent})
|
||||
self.session.headers.update({'User-Agent': self.user_agent})
|
||||
|
||||
def init_cache(self):
|
||||
"""Initialize cache, make sure it is loaded from disk"""
|
||||
|
@ -220,14 +220,16 @@ class ServiceBase(object):
|
|||
# TODO: could check if maybe we already have a text file and
|
||||
# download it directly
|
||||
raise DownloadFailedError('Downloaded file is not a zip file')
|
||||
with zipfile.ZipFile(zippath) as zipsub:
|
||||
for subfile in zipsub.namelist():
|
||||
if os.path.splitext(subfile)[1] in EXTENSIONS:
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(zipsub.open(subfile).read())
|
||||
break
|
||||
else:
|
||||
raise DownloadFailedError('No subtitles found in zip file')
|
||||
zipsub = zipfile.ZipFile(zippath)
|
||||
for subfile in zipsub.namelist():
|
||||
if os.path.splitext(subfile)[1] in EXTENSIONS:
|
||||
with open(filepath, 'wb') as f:
|
||||
f.write(zipsub.open(subfile).read())
|
||||
break
|
||||
else:
|
||||
zipsub.close()
|
||||
raise DownloadFailedError('No subtitles found in zip file')
|
||||
zipsub.close()
|
||||
os.remove(zippath)
|
||||
except Exception as e:
|
||||
logger.error(u'Download %s failed: %s' % (url, e))
|
||||
|
|
|
@ -29,16 +29,17 @@ import os
|
|||
import re
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
class Addic7ed(ServiceBase):
|
||||
server_url = 'http://www.addic7ed.com'
|
||||
site_url = 'http://www.addic7ed.com'
|
||||
api_based = False
|
||||
#TODO: Complete this
|
||||
languages = language_set(['ar', 'ca', 'de', 'el', 'en', 'es', 'eu', 'fr', 'ga', 'gl', 'he', 'hr', 'hu',
|
||||
'it', 'pl', 'pt', 'ro', 'ru', 'se', 'pt-br'])
|
||||
language_map = {'Portuguese (Brazilian)': Language('por-BR'), 'Greek': Language('gre'),
|
||||
'it', 'pl', 'pt', 'ro', 'ru', 'se', 'pb'])
|
||||
language_map = {'Portuguese (Brazilian)': Language('pob'), 'Greek': Language('gre'),
|
||||
'Spanish (Latin America)': Language('spa'), 'Galego': Language('glg'),
|
||||
u'Català': Language('cat')}
|
||||
videos = [Episode]
|
||||
|
@ -63,6 +64,7 @@ class Addic7ed(ServiceBase):
|
|||
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
|
||||
|
||||
def query(self, filepath, languages, keywords, series, season, episode):
|
||||
|
||||
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
|
||||
self.init_cache()
|
||||
try:
|
||||
|
@ -90,7 +92,7 @@ class Addic7ed(ServiceBase):
|
|||
continue
|
||||
sub_keywords = split_keyword(cells[4].text.strip().lower())
|
||||
#TODO: Maybe allow empty keywords here? (same in Subtitulos)
|
||||
if not keywords & sub_keywords:
|
||||
if keywords and not keywords & sub_keywords:
|
||||
logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
|
||||
continue
|
||||
sub_link = '%s/%s' % (self.server_url, cells[9].a['href'])
|
||||
|
|
|
@ -31,11 +31,12 @@ except ImportError:
|
|||
import pickle
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
class BierDopje(ServiceBase):
|
||||
server_url = 'http://api.bierdopje.com/A2B638AC5D804C2E/'
|
||||
site_url = 'http://www.bierdopje.com'
|
||||
user_agent = 'Subliminal/0.6'
|
||||
api_based = True
|
||||
languages = language_set(['eng', 'dut'])
|
||||
|
|
216
lib/subliminal/services/itasa.py
Normal file
216
lib/subliminal/services/itasa.py
Normal file
|
@ -0,0 +1,216 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2012 Mr_Orange <mr_orange@hotmail.it>
|
||||
#
|
||||
# This file is part of subliminal.
|
||||
#
|
||||
# subliminal is free software; you can redistribute it and/or modify it under
|
||||
# the terms of the GNU Lesser General Public License as published by
|
||||
# the Free Software Foundation; either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# subliminal is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
||||
from . import ServiceBase
|
||||
from ..exceptions import DownloadFailedError, ServiceError
|
||||
from ..cache import cachedmethod
|
||||
from ..language import language_set, Language
|
||||
from ..subtitles import get_subtitle_path, ResultSubtitle, EXTENSIONS
|
||||
from ..utils import get_keywords
|
||||
from ..videos import Episode
|
||||
from bs4 import BeautifulSoup
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
import requests
|
||||
import zipfile
|
||||
import StringIO
|
||||
import guessit
|
||||
|
||||
from sickbeard.common import Quality
|
||||
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
class Itasa(ServiceBase):
|
||||
server_url = 'http://www.italiansubs.net/'
|
||||
site_url = 'http://www.italiansubs.net/'
|
||||
api_based = False
|
||||
languages = language_set(['it'])
|
||||
videos = [Episode]
|
||||
require_video = False
|
||||
required_features = ['permissive']
|
||||
quality_dict = {Quality.SDTV : '',
|
||||
Quality.SDDVD : 'dvdrip',
|
||||
Quality.RAWHDTV : '1080i',
|
||||
Quality.HDTV : '720p',
|
||||
Quality.FULLHDTV : ('1080p','720p'),
|
||||
Quality.HDWEBDL : 'web-dl',
|
||||
Quality.FULLHDWEBDL : 'web-dl',
|
||||
Quality.HDBLURAY : ('bdrip', 'bluray'),
|
||||
Quality.FULLHDBLURAY : ('bdrip', 'bluray'),
|
||||
Quality.UNKNOWN : 'unknown' #Any subtitle will be downloaded
|
||||
}
|
||||
|
||||
def init(self):
|
||||
|
||||
super(Itasa, self).init()
|
||||
login_pattern = '<input type="hidden" name="return" value="([^\n\r\t ]+?)" /><input type="hidden" name="([^\n\r\t ]+?)" value="([^\n\r\t ]+?)" />'
|
||||
|
||||
response = requests.get(self.server_url + 'index.php')
|
||||
if response.status_code != 200:
|
||||
raise ServiceError('Initiate failed')
|
||||
|
||||
match = re.search(login_pattern, response.content, re.IGNORECASE | re.DOTALL)
|
||||
if not match:
|
||||
raise ServiceError('Can not find unique id parameter on page')
|
||||
|
||||
login_parameter = {'username': 'sickbeard',
|
||||
'passwd': 'subliminal',
|
||||
'remember': 'yes',
|
||||
'Submit': 'Login',
|
||||
'remember': 'yes',
|
||||
'option': 'com_user',
|
||||
'task': 'login',
|
||||
'silent': 'true',
|
||||
'return': match.group(1),
|
||||
match.group(2): match.group(3)
|
||||
}
|
||||
|
||||
self.session = requests.session()
|
||||
r = self.session.post(self.server_url + 'index.php', data=login_parameter)
|
||||
if not re.search('logouticon.png', r.content, re.IGNORECASE | re.DOTALL):
|
||||
raise ServiceError('Itasa Login Failed')
|
||||
|
||||
@cachedmethod
|
||||
def get_series_id(self, name):
|
||||
"""Get the show page and cache every show found in it"""
|
||||
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=9')
|
||||
soup = BeautifulSoup(r.content, self.required_features)
|
||||
all_series = soup.find('div', attrs = {'id' : 'remositorycontainerlist'})
|
||||
for tv_series in all_series.find_all(href=re.compile('func=select')):
|
||||
series_name = tv_series.text.lower().strip().replace(':','')
|
||||
match = re.search('&id=([0-9]+)', tv_series['href'])
|
||||
if match is None:
|
||||
continue
|
||||
series_id = int(match.group(1))
|
||||
self.cache_for(self.get_series_id, args=(series_name,), result=series_id)
|
||||
return self.cached_value(self.get_series_id, args=(name,))
|
||||
|
||||
def get_episode_id(self, series, series_id, season, episode, quality):
|
||||
"""Get the id subtitle for episode with the given quality"""
|
||||
|
||||
season_link = None
|
||||
quality_link = None
|
||||
episode_id = None
|
||||
|
||||
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=6&func=select&id=' + str(series_id))
|
||||
soup = BeautifulSoup(r.content, self.required_features)
|
||||
all_seasons = soup.find('div', attrs = {'id' : 'remositorycontainerlist'})
|
||||
for seasons in all_seasons.find_all(href=re.compile('func=select')):
|
||||
if seasons.text.lower().strip() == 'stagione %s' % str(season):
|
||||
season_link = seasons['href']
|
||||
break
|
||||
|
||||
if not season_link:
|
||||
logger.debug(u'Could not find season %s for series %s' % (series, str(season)))
|
||||
return None
|
||||
|
||||
r = self.session.get(season_link)
|
||||
soup = BeautifulSoup(r.content, self.required_features)
|
||||
|
||||
all_qualities = soup.find('div', attrs = {'id' : 'remositorycontainerlist'})
|
||||
for qualities in all_qualities.find_all(href=re.compile('func=select')):
|
||||
if qualities.text.lower().strip() in self.quality_dict[quality]:
|
||||
quality_link = qualities['href']
|
||||
r = self.session.get(qualities['href'])
|
||||
soup = BeautifulSoup(r.content, self.required_features)
|
||||
break
|
||||
|
||||
#If we want SDTV we are just on the right page so quality link will be None
|
||||
if not quality == Quality.SDTV and not quality_link:
|
||||
logger.debug(u'Could not find a subtitle with required quality for series %s season %s' % (series, str(season)))
|
||||
return None
|
||||
|
||||
all_episodes = soup.find('div', attrs = {'id' : 'remositoryfilelisting'})
|
||||
for episodes in all_episodes.find_all(href=re.compile('func=fileinfo')):
|
||||
ep_string = "%(seasonnumber)dx%(episodenumber)02d" % {'seasonnumber': season, 'episodenumber': episode}
|
||||
if re.search(ep_string, episodes.text, re.I) or re.search('completa$', episodes.text, re.I):
|
||||
match = re.search('&id=([0-9]+)', episodes['href'])
|
||||
if match:
|
||||
episode_id = match.group(1)
|
||||
return episode_id
|
||||
|
||||
return episode_id
|
||||
|
||||
def list_checked(self, video, languages):
|
||||
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
|
||||
|
||||
def query(self, filepath, languages, keywords, series, season, episode):
|
||||
|
||||
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
|
||||
self.init_cache()
|
||||
try:
|
||||
series = series.lower().replace('(','').replace(')','')
|
||||
series_id = self.get_series_id(series)
|
||||
except KeyError:
|
||||
logger.debug(u'Could not find series id for %s' % series)
|
||||
return []
|
||||
|
||||
episode_id = self.get_episode_id(series, series_id, season, episode, Quality.nameQuality(filepath))
|
||||
if not episode_id:
|
||||
logger.debug(u'Could not find subtitle for series %s' % series)
|
||||
return []
|
||||
|
||||
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=6&func=fileinfo&id=' + episode_id)
|
||||
soup = BeautifulSoup(r.content)
|
||||
|
||||
sub_link = soup.find('div', attrs = {'id' : 'remositoryfileinfo'}).find(href=re.compile('func=download'))['href']
|
||||
sub_language = self.get_language('it')
|
||||
path = get_subtitle_path(filepath, sub_language, self.config.multi)
|
||||
subtitle = ResultSubtitle(path, sub_language, self.__class__.__name__.lower(), sub_link)
|
||||
|
||||
return [subtitle]
|
||||
|
||||
def download(self, subtitle):
|
||||
|
||||
logger.info(u'Downloading %s in %s' % (subtitle.link, subtitle.path))
|
||||
try:
|
||||
r = self.session.get(subtitle.link, headers={'Referer': self.server_url, 'User-Agent': self.user_agent})
|
||||
zipcontent = StringIO.StringIO(r.content)
|
||||
zipsub = zipfile.ZipFile(zipcontent)
|
||||
|
||||
# if not zipsub.is_zipfile(zipcontent):
|
||||
# raise DownloadFailedError('Downloaded file is not a zip file')
|
||||
|
||||
subfile = ''
|
||||
if len(zipsub.namelist()) == 1:
|
||||
subfile = zipsub.namelist()[0]
|
||||
else:
|
||||
#Season Zip Retrive Season and episode Numbers from path
|
||||
guess = guessit.guess_file_info(subtitle.path, 'episode')
|
||||
ep_string = "s%(seasonnumber)02de%(episodenumber)02d" % {'seasonnumber': guess['season'], 'episodenumber': guess['episodeNumber']}
|
||||
for file in zipsub.namelist():
|
||||
if re.search(ep_string, file, re.I):
|
||||
subfile = file
|
||||
break
|
||||
if os.path.splitext(subfile)[1] in EXTENSIONS:
|
||||
with open(subtitle.path, 'wb') as f:
|
||||
f.write(zipsub.open(subfile).read())
|
||||
else:
|
||||
zipsub.close()
|
||||
raise DownloadFailedError('No subtitles found in zip file')
|
||||
|
||||
zipsub.close()
|
||||
except Exception as e:
|
||||
if os.path.exists(subtitle.path):
|
||||
os.remove(subtitle.path)
|
||||
raise DownloadFailedError(str(e))
|
||||
|
||||
logger.debug(u'Download finished')
|
||||
|
||||
Service = Itasa
|
|
@ -27,11 +27,12 @@ import os.path
|
|||
import xmlrpclib
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
class OpenSubtitles(ServiceBase):
|
||||
server_url = 'http://api.opensubtitles.org/xml-rpc'
|
||||
site_url = 'http://www.opensubtitles.org'
|
||||
api_based = True
|
||||
# Source: http://www.opensubtitles.org/addons/export_languages.php
|
||||
languages = language_set(['aar', 'abk', 'ace', 'ach', 'ada', 'ady', 'afa', 'afh', 'afr', 'ain', 'aka', 'akk',
|
||||
|
@ -73,9 +74,9 @@ class OpenSubtitles(ServiceBase):
|
|||
'twi', 'tyv', 'udm', 'uga', 'uig', 'ukr', 'umb', 'urd', 'uzb', 'vai', 'ven', 'vie',
|
||||
'vol', 'vot', 'wak', 'wal', 'war', 'was', 'wel', 'wen', 'wln', 'wol', 'xal', 'xho',
|
||||
'yao', 'yap', 'yid', 'yor', 'ypk', 'zap', 'zen', 'zha', 'znd', 'zul', 'zun',
|
||||
'por-BR', 'rum-MD'])
|
||||
language_map = {'mol': Language('rum-MD'), 'scc': Language('srp'), 'pob': Language('por-BR'),
|
||||
Language('rum-MD'): 'mol', Language('srp'): 'scc', Language('por-BR'): 'pob'}
|
||||
'pob', 'rum-MD'])
|
||||
language_map = {'mol': Language('rum-MD'), 'scc': Language('srp'),
|
||||
Language('rum-MD'): 'mol', Language('srp'): 'scc'}
|
||||
language_code = 'alpha3'
|
||||
videos = [Episode, Movie]
|
||||
require_video = False
|
||||
|
|
|
@ -26,20 +26,21 @@ import logging
|
|||
import xmlrpclib
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
class Podnapisi(ServiceBase):
|
||||
server_url = 'http://ssp.podnapisi.net:8000'
|
||||
site_url = 'http://www.podnapisi.net'
|
||||
api_based = True
|
||||
languages = language_set(['ar', 'be', 'bg', 'bs', 'ca', 'ca', 'cs', 'da', 'de', 'el', 'en',
|
||||
'es', 'et', 'fa', 'fi', 'fr', 'ga', 'he', 'hi', 'hr', 'hu', 'id',
|
||||
'is', 'it', 'ja', 'ko', 'lt', 'lv', 'mk', 'ms', 'nl', 'nn', 'pl',
|
||||
'pt', 'ro', 'ru', 'sk', 'sl', 'sq', 'sr', 'sv', 'th', 'tr', 'uk',
|
||||
'vi', 'zh', 'es-ar', 'pt-br'])
|
||||
'vi', 'zh', 'es-ar', 'pb'])
|
||||
language_map = {'jp': Language('jpn'), Language('jpn'): 'jp',
|
||||
'gr': Language('gre'), Language('gre'): 'gr',
|
||||
'pb': Language('por-BR'), Language('por-BR'): 'pb',
|
||||
# 'pb': Language('por-BR'), Language('por-BR'): 'pb',
|
||||
'ag': Language('spa-AR'), Language('spa-AR'): 'ag',
|
||||
'cyr': Language('srp')}
|
||||
videos = [Episode, Movie]
|
||||
|
|
124
lib/subliminal/services/podnapisiweb.py
Normal file
124
lib/subliminal/services/podnapisiweb.py
Normal file
|
@ -0,0 +1,124 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
|
||||
#
|
||||
# This file is part of subliminal.
|
||||
#
|
||||
# subliminal is free software; you can redistribute it and/or modify it under
|
||||
# the terms of the GNU Lesser General Public License as published by
|
||||
# the Free Software Foundation; either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# subliminal is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
||||
from . import ServiceBase
|
||||
from ..exceptions import DownloadFailedError
|
||||
from ..language import Language, language_set
|
||||
from ..subtitles import ResultSubtitle
|
||||
from ..utils import get_keywords
|
||||
from ..videos import Episode, Movie
|
||||
from bs4 import BeautifulSoup
|
||||
import guessit
|
||||
import logging
|
||||
import re
|
||||
from subliminal.subtitles import get_subtitle_path
|
||||
|
||||
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
class PodnapisiWeb(ServiceBase):
|
||||
server_url = 'http://simple.podnapisi.net'
|
||||
site_url = 'http://www.podnapisi.net'
|
||||
api_based = True
|
||||
user_agent = 'Subliminal/0.6'
|
||||
videos = [Episode, Movie]
|
||||
require_video = False
|
||||
required_features = ['xml']
|
||||
languages = language_set(['Albanian', 'Arabic', 'Spanish (Argentina)', 'Belarusian', 'Bosnian', 'Portuguese (Brazil)', 'Bulgarian', 'Catalan',
|
||||
'Chinese', 'Croatian', 'Czech', 'Danish', 'Dutch', 'English', 'Estonian', 'Persian',
|
||||
'Finnish', 'French', 'German', 'gre', 'Kalaallisut', 'Hebrew', 'Hindi', 'Hungarian',
|
||||
'Icelandic', 'Indonesian', 'Irish', 'Italian', 'Japanese', 'Kazakh', 'Korean', 'Latvian',
|
||||
'Lithuanian', 'Macedonian', 'Malay', 'Norwegian', 'Polish', 'Portuguese', 'Romanian',
|
||||
'Russian', 'Serbian', 'Sinhala', 'Slovak', 'Slovenian', 'Spanish', 'Swedish', 'Thai',
|
||||
'Turkish', 'Ukrainian', 'Vietnamese'])
|
||||
language_map = {Language('Albanian'): 29, Language('Arabic'): 12, Language('Spanish (Argentina)'): 14, Language('Belarusian'): 50,
|
||||
Language('Bosnian'): 10, Language('Portuguese (Brazil)'): 48, Language('Bulgarian'): 33, Language('Catalan'): 53,
|
||||
Language('Chinese'): 17, Language('Croatian'): 38, Language('Czech'): 7, Language('Danish'): 24,
|
||||
Language('Dutch'): 23, Language('English'): 2, Language('Estonian'): 20, Language('Persian'): 52,
|
||||
Language('Finnish'): 31, Language('French'): 8, Language('German'): 5, Language('gre'): 16,
|
||||
Language('Kalaallisut'): 57, Language('Hebrew'): 22, Language('Hindi'): 42, Language('Hungarian'): 15,
|
||||
Language('Icelandic'): 6, Language('Indonesian'): 54, Language('Irish'): 49, Language('Italian'): 9,
|
||||
Language('Japanese'): 11, Language('Kazakh'): 58, Language('Korean'): 4, Language('Latvian'): 21,
|
||||
Language('Lithuanian'): 19, Language('Macedonian'): 35, Language('Malay'): 55,
|
||||
Language('Norwegian'): 3, Language('Polish'): 26, Language('Portuguese'): 32, Language('Romanian'): 13,
|
||||
Language('Russian'): 27, Language('Serbian'): 36, Language('Sinhala'): 56, Language('Slovak'): 37,
|
||||
Language('Slovenian'): 1, Language('Spanish'): 28, Language('Swedish'): 25, Language('Thai'): 44,
|
||||
Language('Turkish'): 30, Language('Ukrainian'): 46, Language('Vietnamese'): 51,
|
||||
29: Language('Albanian'), 12: Language('Arabic'), 14: Language('Spanish (Argentina)'), 50: Language('Belarusian'),
|
||||
10: Language('Bosnian'), 48: Language('Portuguese (Brazil)'), 33: Language('Bulgarian'), 53: Language('Catalan'),
|
||||
17: Language('Chinese'), 38: Language('Croatian'), 7: Language('Czech'), 24: Language('Danish'),
|
||||
23: Language('Dutch'), 2: Language('English'), 20: Language('Estonian'), 52: Language('Persian'),
|
||||
31: Language('Finnish'), 8: Language('French'), 5: Language('German'), 16: Language('gre'),
|
||||
57: Language('Kalaallisut'), 22: Language('Hebrew'), 42: Language('Hindi'), 15: Language('Hungarian'),
|
||||
6: Language('Icelandic'), 54: Language('Indonesian'), 49: Language('Irish'), 9: Language('Italian'),
|
||||
11: Language('Japanese'), 58: Language('Kazakh'), 4: Language('Korean'), 21: Language('Latvian'),
|
||||
19: Language('Lithuanian'), 35: Language('Macedonian'), 55: Language('Malay'), 40: Language('Chinese'),
|
||||
3: Language('Norwegian'), 26: Language('Polish'), 32: Language('Portuguese'), 13: Language('Romanian'),
|
||||
27: Language('Russian'), 36: Language('Serbian'), 47: Language('Serbian'), 56: Language('Sinhala'),
|
||||
37: Language('Slovak'), 1: Language('Slovenian'), 28: Language('Spanish'), 25: Language('Swedish'),
|
||||
44: Language('Thai'), 30: Language('Turkish'), 46: Language('Ukrainian'), Language('Vietnamese'): 51}
|
||||
|
||||
def list_checked(self, video, languages):
|
||||
if isinstance(video, Movie):
|
||||
return self.query(video.path or video.release, languages, video.title, year=video.year,
|
||||
keywords=get_keywords(video.guess))
|
||||
if isinstance(video, Episode):
|
||||
return self.query(video.path or video.release, languages, video.series, season=video.season,
|
||||
episode=video.episode, keywords=get_keywords(video.guess))
|
||||
|
||||
def query(self, filepath, languages, title, season=None, episode=None, year=None, keywords=None):
|
||||
params = {'sXML': 1, 'sK': title, 'sJ': ','.join([str(self.get_code(l)) for l in languages])}
|
||||
if season is not None:
|
||||
params['sTS'] = season
|
||||
if episode is not None:
|
||||
params['sTE'] = episode
|
||||
if year is not None:
|
||||
params['sY'] = year
|
||||
if keywords is not None:
|
||||
params['sR'] = keywords
|
||||
r = self.session.get(self.server_url + '/ppodnapisi/search', params=params)
|
||||
if r.status_code != 200:
|
||||
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
|
||||
return []
|
||||
subtitles = []
|
||||
soup = BeautifulSoup(r.content, self.required_features)
|
||||
for sub in soup('subtitle'):
|
||||
if 'n' in sub.flags:
|
||||
logger.debug(u'Skipping hearing impaired')
|
||||
continue
|
||||
language = self.get_language(sub.languageId.text)
|
||||
confidence = float(sub.rating.text) / 5.0
|
||||
sub_keywords = set()
|
||||
for release in sub.release.text.split():
|
||||
sub_keywords |= get_keywords(guessit.guess_file_info(release + '.srt', 'autodetect'))
|
||||
sub_path = get_subtitle_path(filepath, language, self.config.multi)
|
||||
subtitle = ResultSubtitle(sub_path, language, self.__class__.__name__.lower(),
|
||||
sub.url.text, confidence=confidence, keywords=sub_keywords)
|
||||
subtitles.append(subtitle)
|
||||
return subtitles
|
||||
|
||||
def download(self, subtitle):
|
||||
r = self.session.get(subtitle.link)
|
||||
if r.status_code != 200:
|
||||
raise DownloadFailedError()
|
||||
soup = BeautifulSoup(r.content)
|
||||
self.download_zip_file(self.server_url + soup.find('a', href=re.compile('download'))['href'], subtitle.path)
|
||||
return subtitle
|
||||
|
||||
|
||||
Service = PodnapisiWeb
|
|
@ -26,15 +26,16 @@ import logging
|
|||
import urllib
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
class SubsWiki(ServiceBase):
|
||||
server_url = 'http://www.subswiki.com'
|
||||
site_url = 'http://www.subswiki.com'
|
||||
api_based = False
|
||||
languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'por-BR', 'por', 'spa-ES', u'spa', u'ita', u'cat'])
|
||||
languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'pob', 'por', 'spa-ES', u'spa', u'ita', u'cat'])
|
||||
language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), u'Español (Latinoamérica)': Language('spa'),
|
||||
u'Català': Language('cat'), u'Brazilian': Language('por-BR'), u'English (US)': Language('eng-US'),
|
||||
u'Català': Language('cat'), u'Brazilian': Language('pob'), u'English (US)': Language('eng-US'),
|
||||
u'English (UK)': Language('eng-GB')}
|
||||
language_code = 'name'
|
||||
videos = [Episode, Movie]
|
||||
|
@ -77,7 +78,7 @@ class SubsWiki(ServiceBase):
|
|||
subtitles = []
|
||||
for sub in soup('td', {'class': 'NewsTitle'}):
|
||||
sub_keywords = split_keyword(sub.b.string.lower())
|
||||
if not keywords & sub_keywords:
|
||||
if keywords and not keywords & sub_keywords:
|
||||
logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
|
||||
continue
|
||||
for html_language in sub.parent.parent.find_all('td', {'class': 'language'}):
|
||||
|
|
|
@ -27,15 +27,16 @@ import unicodedata
|
|||
import urllib
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
class Subtitulos(ServiceBase):
|
||||
server_url = 'http://www.subtitulos.es'
|
||||
site_url = 'http://www.subtitulos.es'
|
||||
api_based = False
|
||||
languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'por-BR', 'por', 'spa-ES', u'spa', u'ita', u'cat'])
|
||||
language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), u'Español (Latinoamérica)': Language('spa'),
|
||||
u'Català': Language('cat'), u'Brazilian': Language('por-BR'), u'English (US)': Language('eng-US'),
|
||||
languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'pob', 'por', 'spa-ES', u'spa', u'ita', u'cat'])
|
||||
language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), #u'Español (Latinoamérica)': Language('spa'),
|
||||
u'Català': Language('cat'), u'Brazilian': Language('pob'), u'English (US)': Language('eng-US'),
|
||||
u'English (UK)': Language('eng-GB'), 'Galego': Language('glg')}
|
||||
language_code = 'name'
|
||||
videos = [Episode]
|
||||
|
@ -45,12 +46,13 @@ class Subtitulos(ServiceBase):
|
|||
# and the 'ó' char directly. This is because now BS4 converts the html
|
||||
# code chars into their equivalent unicode char
|
||||
release_pattern = re.compile('Versi.+n (.+) ([0-9]+).([0-9])+ megabytes')
|
||||
|
||||
extra_keywords_pattern = re.compile("(?:con|para)\s(?:720p)?(?:\-|\s)?([A-Za-z]+)(?:\-|\s)?(?:720p)?(?:\s|\.)(?:y\s)?(?:720p)?(?:\-\s)?([A-Za-z]+)?(?:\-\s)?(?:720p)?(?:\.)?");
|
||||
|
||||
def list_checked(self, video, languages):
|
||||
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
|
||||
|
||||
def query(self, filepath, languages, keywords, series, season, episode):
|
||||
request_series = series.lower().replace(' ', '_')
|
||||
request_series = series.lower().replace(' ', '-').replace('&', '@').replace('(','').replace(')','')
|
||||
if isinstance(request_series, unicode):
|
||||
request_series = unicodedata.normalize('NFKD', request_series).encode('ascii', 'ignore')
|
||||
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
|
||||
|
@ -65,7 +67,7 @@ class Subtitulos(ServiceBase):
|
|||
subtitles = []
|
||||
for sub in soup('div', {'id': 'version'}):
|
||||
sub_keywords = split_keyword(self.release_pattern.search(sub.find('p', {'class': 'title-sub'}).contents[1]).group(1).lower())
|
||||
if not keywords & sub_keywords:
|
||||
if keywords and not keywords & sub_keywords:
|
||||
logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
|
||||
continue
|
||||
for html_language in sub.findAllNext('ul', {'class': 'sslist'}):
|
||||
|
|
|
@ -16,22 +16,23 @@
|
|||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
||||
from . import ServiceBase
|
||||
from ..language import language_set
|
||||
from ..language import language_set, Language
|
||||
from ..subtitles import get_subtitle_path, ResultSubtitle
|
||||
from ..videos import Episode, Movie, UnknownVideo
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
class TheSubDB(ServiceBase):
|
||||
server_url = 'http://api.thesubdb.com'
|
||||
site_url = 'http://www.thesubdb.com/'
|
||||
user_agent = 'SubDB/1.0 (subliminal/0.6; https://github.com/Diaoul/subliminal)'
|
||||
api_based = True
|
||||
# Source: http://api.thesubdb.com/?action=languages
|
||||
languages = language_set(['af', 'cs', 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'id', 'it',
|
||||
'la', 'nl', 'no', 'oc', 'pl', 'pt', 'ro', 'ru', 'sl', 'sr', 'sv',
|
||||
'la', 'nl', 'no', 'oc', 'pl', 'pb', 'ro', 'ru', 'sl', 'sr', 'sv',
|
||||
'tr'])
|
||||
videos = [Movie, Episode, UnknownVideo]
|
||||
require_video = True
|
||||
|
@ -48,6 +49,10 @@ class TheSubDB(ServiceBase):
|
|||
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
|
||||
return []
|
||||
available_languages = language_set(r.content.split(','))
|
||||
#this is needed becase for theSubDB pt languages is Portoguese Brazil and not Portoguese#
|
||||
#So we are deleting pt language and adding pb language
|
||||
if Language('pt') in available_languages:
|
||||
available_languages = available_languages - language_set(['pt']) | language_set(['pb'])
|
||||
languages &= available_languages
|
||||
if not languages:
|
||||
logger.debug(u'Could not find subtitles for hash %s with languages %r (only %r available)' % (moviehash, languages, available_languages))
|
||||
|
|
|
@ -26,7 +26,7 @@ import logging
|
|||
import re
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
def match(pattern, string):
|
||||
|
@ -39,13 +39,14 @@ def match(pattern, string):
|
|||
|
||||
class TvSubtitles(ServiceBase):
|
||||
server_url = 'http://www.tvsubtitles.net'
|
||||
site_url = 'http://www.tvsubtitles.net'
|
||||
api_based = False
|
||||
languages = language_set(['ar', 'bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', 'hu',
|
||||
'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'uk',
|
||||
'zh', 'pt-br'])
|
||||
'zh', 'pb'])
|
||||
#TODO: Find more exceptions
|
||||
language_map = {'gr': Language('gre'), 'cz': Language('cze'), 'ua': Language('ukr'),
|
||||
'cn': Language('chi')}
|
||||
'cn': Language('chi'), 'br': Language('pob')}
|
||||
videos = [Episode]
|
||||
require_video = False
|
||||
required_features = ['permissive']
|
||||
|
|
99
lib/subliminal/services/usub.py
Normal file
99
lib/subliminal/services/usub.py
Normal file
|
@ -0,0 +1,99 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2013 Julien Goret <jgoret@gmail.com>
|
||||
#
|
||||
# This file is part of subliminal.
|
||||
#
|
||||
# subliminal is free software; you can redistribute it and/or modify it under
|
||||
# the terms of the GNU Lesser General Public License as published by
|
||||
# the Free Software Foundation; either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# subliminal is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
||||
from . import ServiceBase
|
||||
from ..exceptions import ServiceError
|
||||
from ..language import language_set, Language
|
||||
from ..subtitles import get_subtitle_path, ResultSubtitle
|
||||
from ..utils import get_keywords, split_keyword
|
||||
from ..videos import Episode
|
||||
from bs4 import BeautifulSoup
|
||||
import logging
|
||||
import urllib
|
||||
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
class Usub(ServiceBase):
|
||||
server_url = 'http://www.u-sub.net/sous-titres'
|
||||
site_url = 'http://www.u-sub.net/'
|
||||
api_based = False
|
||||
languages = language_set(['fr'])
|
||||
videos = [Episode]
|
||||
require_video = False
|
||||
#required_features = ['permissive']
|
||||
|
||||
def list_checked(self, video, languages):
|
||||
return self.query(video.path or video.release, languages, get_keywords(video.guess), series=video.series, season=video.season, episode=video.episode)
|
||||
|
||||
def query(self, filepath, languages, keywords=None, series=None, season=None, episode=None):
|
||||
|
||||
## Check if we really got informations about our episode
|
||||
if series and season and episode:
|
||||
request_series = series.lower().replace(' ', '-')
|
||||
if isinstance(request_series, unicode):
|
||||
request_series = request_series.encode('utf-8')
|
||||
logger.debug(u'Getting subtitles for %s season %d episode %d with language %r' % (series, season, episode, languages))
|
||||
r = self.session.get('%s/%s/saison_%s' % (self.server_url, urllib.quote(request_series),season))
|
||||
if r.status_code == 404:
|
||||
print "Error 404"
|
||||
logger.debug(u'Could not find subtitles for %s' % (series))
|
||||
return []
|
||||
else:
|
||||
print "One or more parameter missing"
|
||||
raise ServiceError('One or more parameter missing')
|
||||
|
||||
## Check if we didn't got an big and nasty http error
|
||||
if r.status_code != 200:
|
||||
print u'Request %s returned status code %d' % (r.url, r.status_code)
|
||||
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
|
||||
return []
|
||||
|
||||
## Editing episode informations to be able to use it with our search
|
||||
if episode < 10 :
|
||||
episode_num='0'+str(episode)
|
||||
else :
|
||||
episode_num=str(episode)
|
||||
season_num = str(season)
|
||||
series_name = series.lower().replace(' ', '.')
|
||||
possible_episode_naming = [season_num+'x'+episode_num,season_num+episode_num]
|
||||
|
||||
|
||||
## Actually parsing the page for the good subtitles
|
||||
soup = BeautifulSoup(r.content, self.required_features)
|
||||
subtitles = []
|
||||
subtitles_list = soup.find('table', {'id' : 'subtitles_list'})
|
||||
link_list = subtitles_list.findAll('a', {'class' : 'dl_link'})
|
||||
|
||||
for link in link_list :
|
||||
link_url = link.get('href')
|
||||
splited_link = link_url.split('/')
|
||||
filename = splited_link[len(splited_link)-1]
|
||||
for episode_naming in possible_episode_naming :
|
||||
if episode_naming in filename :
|
||||
for language in languages:
|
||||
path = get_subtitle_path(filepath, language, self.config.multi)
|
||||
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s' % (link_url))
|
||||
subtitles.append(subtitle)
|
||||
return subtitles
|
||||
|
||||
def download(self, subtitle):
|
||||
## All downloaded files are zip files
|
||||
self.download_zip_file(subtitle.link, subtitle.path)
|
||||
return subtitle
|
||||
|
||||
|
||||
Service = Usub
|
|
@ -26,10 +26,13 @@ import mimetypes
|
|||
import os
|
||||
import struct
|
||||
|
||||
from sickbeard import encodingKludge as ek
|
||||
import sickbeard
|
||||
|
||||
|
||||
__all__ = ['EXTENSIONS', 'MIMETYPES', 'Video', 'Episode', 'Movie', 'UnknownVideo',
|
||||
'scan', 'hash_opensubtitles', 'hash_thesubdb']
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
#: Video extensions
|
||||
EXTENSIONS = ['.avi', '.mkv', '.mpg', '.mp4', '.m4v', '.mov', '.ogm', '.ogv', '.wmv',
|
||||
|
@ -55,6 +58,10 @@ class Video(object):
|
|||
self.imdbid = imdbid
|
||||
self._path = None
|
||||
self.hashes = {}
|
||||
|
||||
if isinstance(path, unicode):
|
||||
path = path.encode('utf-8')
|
||||
|
||||
if os.path.exists(path):
|
||||
self._path = path
|
||||
self.size = os.path.getsize(self._path)
|
||||
|
@ -138,6 +145,10 @@ class Video(object):
|
|||
if folder == '':
|
||||
folder = '.'
|
||||
existing = [f for f in os.listdir(folder) if f.startswith(basename)]
|
||||
if sickbeard.SUBTITLES_DIR:
|
||||
subsDir = ek.ek(os.path.join, folder, sickbeard.SUBTITLES_DIR)
|
||||
if ek.ek(os.path.isdir, subsDir):
|
||||
existing.extend([f for f in os.listdir(subsDir) if f.startswith(basename)])
|
||||
for path in existing:
|
||||
for ext in subtitles.EXTENSIONS:
|
||||
if path.endswith(ext):
|
||||
|
@ -214,6 +225,9 @@ def scan(entry, max_depth=3, scan_filter=None, depth=0):
|
|||
:rtype: list of (:class:`Video`, [:class:`~subliminal.subtitles.Subtitle`])
|
||||
|
||||
"""
|
||||
if isinstance(entry, unicode):
|
||||
entry = entry.encode('utf-8')
|
||||
|
||||
if depth > max_depth and max_depth != 0: # we do not want to search the whole file system except if max_depth = 0
|
||||
return []
|
||||
if os.path.isdir(entry): # a dir? recurse
|
||||
|
|
|
@ -27,7 +27,7 @@ from sickbeard import encodingKludge as ek
|
|||
from sickbeard.name_parser.parser import NameParser, InvalidNameException
|
||||
|
||||
MIN_DB_VERSION = 9 # oldest db version we support migrating from
|
||||
MAX_DB_VERSION = 28
|
||||
MAX_DB_VERSION = 29
|
||||
|
||||
|
||||
class MainSanityCheck(db.DBSanityCheck):
|
||||
|
@ -538,10 +538,14 @@ class AddProperSearch(AddUpdateTVDB):
|
|||
|
||||
class AddDvdOrderOption(AddProperSearch):
|
||||
def test(self):
|
||||
return self.hasColumn("tv_shows", "dvdorder")
|
||||
return self.checkDBVersion() >= 20
|
||||
|
||||
def execute(self):
|
||||
self.connection.action("ALTER TABLE tv_shows ADD COLUMN dvdorder NUMERIC DEFAULT 0")
|
||||
backupDatabase(20)
|
||||
|
||||
logger.log(u"Adding column dvdorder to tvshows")
|
||||
if not self.hasColumn("tv_shows", "dvdorder"):
|
||||
self.addColumn("tv_shows", "dvdorder", "NUMERIC", "0")
|
||||
|
||||
self.incDBVersion()
|
||||
|
||||
|
@ -552,6 +556,10 @@ class ConvertTVShowsToIndexerScheme(AddDvdOrderOption):
|
|||
def execute(self):
|
||||
backupDatabase(22)
|
||||
|
||||
logger.log(u"Adding column dvdorder to tvshows")
|
||||
if not self.hasColumn("tv_shows", "dvdorder"):
|
||||
self.addColumn("tv_shows", "dvdorder", "NUMERIC", "0")
|
||||
|
||||
logger.log(u"Converting TV Shows table to Indexer Scheme...")
|
||||
|
||||
if self.hasTable("tmp_tv_shows"):
|
||||
|
@ -657,7 +665,9 @@ class AddArchiveFirstMatchOption(ConvertInfoToIndexerScheme):
|
|||
def execute(self):
|
||||
backupDatabase(26)
|
||||
|
||||
self.connection.action("ALTER TABLE tv_shows ADD COLUMN archive_firstmatch NUMERIC DEFAULT 0")
|
||||
logger.log(u"Adding column archive_firstmatch to tvshows")
|
||||
if not self.hasColumn("tv_shows", "archive_firstmatch"):
|
||||
self.addColumn("tv_shows", "archive_firstmatch", "NUMERIC", "0")
|
||||
|
||||
self.incDBVersion()
|
||||
|
||||
|
@ -696,4 +706,23 @@ class ConvertIndexerToInteger(AddSceneNumbering):
|
|||
|
||||
self.connection.mass_action(ql)
|
||||
|
||||
self.incDBVersion()
|
||||
self.incDBVersion()
|
||||
|
||||
class AddRequireAndIgnoreWords(ConvertIndexerToInteger):
|
||||
""" Adding column rls_require_words and rls_ignore_words to tv_shows """
|
||||
|
||||
def test(self):
|
||||
return self.checkDBVersion() >= 29
|
||||
|
||||
def execute(self):
|
||||
backupDatabase(29)
|
||||
|
||||
logger.log(u"Adding column rls_require_words to tvshows")
|
||||
if not self.hasColumn("tv_shows", "rls_require_words"):
|
||||
self.addColumn("tv_shows", "rls_require_words", "TEXT", "")
|
||||
|
||||
logger.log(u"Adding column rls_ignore_words to tvshows")
|
||||
if not self.hasColumn("tv_shows", "rls_ignore_words"):
|
||||
self.addColumn("tv_shows", "rls_ignore_words", "TEXT", "")
|
||||
|
||||
self.incDBVersion()
|
||||
|
|
|
@ -161,13 +161,23 @@ class ProperFinder():
|
|||
logger.DEBUG)
|
||||
continue
|
||||
|
||||
showObj = helpers.findCertainShow(sickbeard.showList, curProper.indexerid)
|
||||
if not showObj:
|
||||
logger.log(u"Unable to find the show with indexerID " + str(curProper.indexerid), logger.ERROR)
|
||||
continue
|
||||
|
||||
if showObj.rls_ignore_words and search.filter_release_name(curProper.name, showObj.rls_ignore_words):
|
||||
logger.log(u"Ignoring " + curProper.name + " based on ignored words filter: " + showObj.rls_ignore_words,
|
||||
logger.MESSAGE)
|
||||
continue
|
||||
|
||||
if showObj.rls_require_words and not search.filter_release_name(curProper.name, showObj.rls_require_words):
|
||||
logger.log(u"Ignoring " + curProper.name + " based on required words filter: " + showObj.rls_require_words,
|
||||
logger.MESSAGE)
|
||||
continue
|
||||
|
||||
# if we have an air-by-date show then get the real season/episode numbers
|
||||
if curProper.season == -1 and curProper.indexerid:
|
||||
showObj = helpers.findCertainShow(sickbeard.showList, curProper.indexerid)
|
||||
if not showObj:
|
||||
logger.log(u"This should never have happened, post a bug about this!", logger.ERROR)
|
||||
raise Exception("BAD STUFF HAPPENED")
|
||||
|
||||
indexer_lang = showObj.lang
|
||||
lINDEXER_API_PARMS = sickbeard.indexerApi(showObj.indexer).api_params.copy()
|
||||
if indexer_lang and not indexer_lang == 'en':
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
from __future__ import with_statement
|
||||
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
import datetime
|
||||
|
||||
|
@ -212,7 +213,7 @@ def searchForNeededEpisodes():
|
|||
if not bestResult or bestResult.quality < curResult.quality:
|
||||
bestResult = curResult
|
||||
|
||||
bestResult = pickBestResult(curFoundResults[curEp])
|
||||
bestResult = pickBestResult(curFoundResults[curEp], curEp.show)
|
||||
|
||||
# if all results were rejected move on to the next episode
|
||||
if not bestResult:
|
||||
|
@ -231,8 +232,27 @@ def searchForNeededEpisodes():
|
|||
|
||||
return foundResults.values()
|
||||
|
||||
def filter_release_name(name, filter_words):
|
||||
"""
|
||||
Filters out results based on filter_words
|
||||
|
||||
def pickBestResult(results, quality_list=None):
|
||||
name: name to check
|
||||
filter_words : Words to filter on, separated by comma
|
||||
|
||||
Returns: False if the release name is OK, True if it contains one of the filter_words
|
||||
"""
|
||||
if filter_words:
|
||||
for test_word in filter_words.split(','):
|
||||
test_word = test_word.strip()
|
||||
|
||||
if test_word:
|
||||
if re.search('(^|[\W_]|[\s_])' + test_word + '($|[\W_]|[\s_])', name, re.I):
|
||||
logger.log(u"" + name + " contains word: " + test_word, logger.DEBUG)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def pickBestResult(results, show, quality_list=None):
|
||||
logger.log(u"Picking the best result out of " + str([x.name for x in results]), logger.DEBUG)
|
||||
|
||||
# find the best result for the current episode
|
||||
|
@ -244,6 +264,16 @@ def pickBestResult(results, quality_list=None):
|
|||
logger.log(cur_result.name + " is a quality we know we don't want, rejecting it", logger.DEBUG)
|
||||
continue
|
||||
|
||||
if show.rls_ignore_words and filter_release_name(cur_result.name, show.rls_ignore_words):
|
||||
logger.log(u"Ignoring " + cur_result.name + " based on ignored words filter: " + show.rls_ignore_words,
|
||||
logger.MESSAGE)
|
||||
continue
|
||||
|
||||
if show.rls_require_words and not filter_release_name(cur_result.name, show.rls_require_words):
|
||||
logger.log(u"Ignoring " + cur_result.name + " based on required words filter: " + show.rls_require_words,
|
||||
logger.MESSAGE)
|
||||
continue
|
||||
|
||||
if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(cur_result.name, cur_result.size,
|
||||
cur_result.provider.name):
|
||||
logger.log(cur_result.name + u" has previously failed, rejecting it")
|
||||
|
@ -371,7 +401,7 @@ def findEpisode(episode, manualSearch=False):
|
|||
logger.log(u"No NZB/Torrent providers found or enabled in the sickbeard config. Please check your settings.",
|
||||
logger.ERROR)
|
||||
|
||||
bestResult = pickBestResult(foundResults)
|
||||
bestResult = pickBestResult(foundResults, episode.show)
|
||||
|
||||
return bestResult
|
||||
|
||||
|
@ -426,7 +456,7 @@ def findSeason(show, season):
|
|||
# pick the best season NZB
|
||||
bestSeasonNZB = None
|
||||
if SEASON_RESULT in foundResults:
|
||||
bestSeasonNZB = pickBestResult(foundResults[SEASON_RESULT], anyQualities + bestQualities)
|
||||
bestSeasonNZB = pickBestResult(foundResults[SEASON_RESULT], show, anyQualities + bestQualities)
|
||||
|
||||
highest_quality_overall = 0
|
||||
for cur_season in foundResults:
|
||||
|
@ -595,6 +625,6 @@ def findSeason(show, season):
|
|||
if len(foundResults[curEp]) == 0:
|
||||
continue
|
||||
|
||||
finalResults.append(pickBestResult(foundResults[curEp]))
|
||||
finalResults.append(pickBestResult(foundResults[curEp], show))
|
||||
|
||||
return finalResults
|
||||
|
|
|
@ -69,9 +69,10 @@ def filterBadReleases(name):
|
|||
# return True
|
||||
|
||||
# if any of the bad strings are in the name then say no
|
||||
for x in resultFilters + sickbeard.IGNORE_WORDS.split(','):
|
||||
if re.search('(^|[\W_]|[\s_])' + x.strip() + '($|[\W_]|[\s_])', name, re.I):
|
||||
logger.log(u"Invalid scene release: " + name + " contains " + x + ", ignoring it", logger.DEBUG)
|
||||
for ignore_word in resultFilters + sickbeard.IGNORE_WORDS.split(','):
|
||||
ignore_word = ignore_word.strip()
|
||||
if re.search('(^|[\W_]|[\s_])' + ignore_word + '($|[\W_]|[\s_])', name, re.I):
|
||||
logger.log(u"Invalid scene release: " + name + " contains " + ignore_word + ", ignoring it", logger.DEBUG)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
|
|
@ -28,10 +28,8 @@ from sickbeard import history
|
|||
from lib import subliminal
|
||||
|
||||
SINGLE = 'und'
|
||||
|
||||
|
||||
def sortedServiceList():
|
||||
servicesMapping = dict([(x.lower(), x) for x in subliminal.Subtitle.core.Providers])
|
||||
servicesMapping = dict([(x.lower(), x) for x in subliminal.core.SERVICES])
|
||||
|
||||
newList = []
|
||||
|
||||
|
@ -39,50 +37,33 @@ def sortedServiceList():
|
|||
curIndex = 0
|
||||
for curService in sickbeard.SUBTITLES_SERVICES_LIST:
|
||||
if curService in servicesMapping:
|
||||
curServiceDict = {'id': curService, 'image': curService + '.png', 'name': servicesMapping[curService],
|
||||
'enabled': sickbeard.SUBTITLES_SERVICES_ENABLED[curIndex] == 1,
|
||||
'api_based': __import__('lib.subliminal.services.' + curService, globals=globals(),
|
||||
locals=locals(), fromlist=['Service'],
|
||||
level=-1).Service.api_based,
|
||||
'url': __import__('lib.subliminal.services.' + curService, globals=globals(),
|
||||
locals=locals(), fromlist=['Service'], level=-1).Service.site_url}
|
||||
curServiceDict = {'id': curService, 'image': curService+'.png', 'name': servicesMapping[curService], 'enabled': sickbeard.SUBTITLES_SERVICES_ENABLED[curIndex] == 1, 'api_based': __import__('lib.subliminal.services.' + curService, globals=globals(), locals=locals(), fromlist=['Service'], level=-1).Service.api_based, 'url': __import__('lib.subliminal.services.' + curService, globals=globals(), locals=locals(), fromlist=['Service'], level=-1).Service.site_url}
|
||||
newList.append(curServiceDict)
|
||||
curIndex += 1
|
||||
|
||||
# add any services that are missing from that list
|
||||
for curService in servicesMapping.keys():
|
||||
if curService not in [x['id'] for x in newList]:
|
||||
curServiceDict = {'id': curService, 'image': curService + '.png', 'name': servicesMapping[curService],
|
||||
'enabled': False,
|
||||
'api_based': __import__('lib.subliminal.services.' + curService, globals=globals(),
|
||||
locals=locals(), fromlist=['Service'],
|
||||
level=-1).Service.api_based,
|
||||
'url': __import__('lib.subliminal.services.' + curService, globals=globals(),
|
||||
locals=locals(), fromlist=['Service'], level=-1).Service.site_url}
|
||||
curServiceDict = {'id': curService, 'image': curService+'.png', 'name': servicesMapping[curService], 'enabled': False, 'api_based': __import__('lib.subliminal.services.' + curService, globals=globals(), locals=locals(), fromlist=['Service'], level=-1).Service.api_based, 'url': __import__('lib.subliminal.services.' + curService, globals=globals(), locals=locals(), fromlist=['Service'], level=-1).Service.site_url}
|
||||
newList.append(curServiceDict)
|
||||
|
||||
return newList
|
||||
|
||||
|
||||
|
||||
def getEnabledServiceList():
|
||||
return [x['name'] for x in sortedServiceList() if x['enabled']]
|
||||
|
||||
|
||||
|
||||
def isValidLanguage(language):
|
||||
return subliminal.language.language_list(language)
|
||||
|
||||
|
||||
def getLanguageName(selectLang):
|
||||
return subliminal.language.Language(selectLang).name
|
||||
|
||||
|
||||
def wantedLanguages(sqlLike=False):
|
||||
def wantedLanguages(sqlLike = False):
|
||||
wantedLanguages = sorted(sickbeard.SUBTITLES_LANGUAGES)
|
||||
if sqlLike:
|
||||
return '%' + ','.join(wantedLanguages) + '%'
|
||||
return wantedLanguages
|
||||
|
||||
|
||||
def subtitlesLanguages(video_path):
|
||||
"""Return a list detected subtitles for the given video file"""
|
||||
video = subliminal.videos.Video.from_path(video_path)
|
||||
|
@ -95,27 +76,22 @@ def subtitlesLanguages(video_path):
|
|||
languages.add(SINGLE)
|
||||
return list(languages)
|
||||
|
||||
|
||||
# Return a list with languages that have alpha2 code
|
||||
def subtitleLanguageFilter():
|
||||
return [language for language in subliminal.language.LANGUAGES if language[2] != ""]
|
||||
|
||||
|
||||
class SubtitlesFinder():
|
||||
"""
|
||||
The SubtitlesFinder will be executed every hour but will not necessarly search
|
||||
and download subtitles. Only if the defined rule is true
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
# TODO: Put that in the __init__ before starting the thread?
|
||||
if not sickbeard.USE_SUBTITLES:
|
||||
logger.log(u'Subtitles support disabled', logger.DEBUG)
|
||||
return
|
||||
if len(sickbeard.subtitles.getEnabledServiceList()) < 1:
|
||||
logger.log(
|
||||
u'Not enough services selected. At least 1 service is required to search subtitles in the background',
|
||||
logger.ERROR)
|
||||
logger.log(u'Not enough services selected. At least 1 service is required to search subtitles in the background', logger.ERROR)
|
||||
return
|
||||
|
||||
logger.log(u'Checking for subtitles', logger.MESSAGE)
|
||||
|
@ -126,51 +102,40 @@ class SubtitlesFinder():
|
|||
# - episode subtitles != config wanted languages or SINGLE (depends on config multi)
|
||||
# - search count < 2 and diff(airdate, now) > 1 week : now -> 1d
|
||||
# - search count < 7 and diff(airdate, now) <= 1 week : now -> 4h -> 8h -> 16h -> 1d -> 1d -> 1d
|
||||
|
||||
|
||||
myDB = db.DBConnection()
|
||||
today = datetime.date.today().toordinal()
|
||||
# you have 5 minutes to understand that one. Good luck
|
||||
sqlResults = myDB.select(
|
||||
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.subtitles, e.subtitles_searchcount AS searchcount, e.subtitles_lastsearch AS lastsearch, e.location, (? - e.airdate) AS airdate_daydiff FROM tv_episodes AS e INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id) WHERE s.subtitles = 1 AND e.subtitles NOT LIKE (?) AND ((e.subtitles_searchcount <= 2 AND (? - e.airdate) > 7) OR (e.subtitles_searchcount <= 7 AND (? - e.airdate) <= 7)) AND (e.status IN (' + ','.join(
|
||||
[str(x) for x in Quality.DOWNLOADED]) + ') OR (e.status IN (' + ','.join(
|
||||
[str(x) for x in Quality.SNATCHED + Quality.SNATCHED_PROPER]) + ') AND e.location != ""))',
|
||||
[today, wantedLanguages(True), today, today])
|
||||
sqlResults = myDB.select('SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.subtitles, e.subtitles_searchcount AS searchcount, e.subtitles_lastsearch AS lastsearch, e.location, (? - e.airdate) AS airdate_daydiff FROM tv_episodes AS e INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id) WHERE s.subtitles = 1 AND e.subtitles NOT LIKE (?) AND ((e.subtitles_searchcount <= 2 AND (? - e.airdate) > 7) OR (e.subtitles_searchcount <= 7 AND (? - e.airdate) <= 7)) AND (e.status IN ('+','.join([str(x) for x in Quality.DOWNLOADED])+') OR (e.status IN ('+','.join([str(x) for x in Quality.SNATCHED + Quality.SNATCHED_PROPER])+') AND e.location != ""))', [today, wantedLanguages(True), today, today])
|
||||
if len(sqlResults) == 0:
|
||||
logger.log('No subtitles to download', logger.MESSAGE)
|
||||
return
|
||||
|
||||
|
||||
rules = self._getRules()
|
||||
now = datetime.datetime.now()
|
||||
for epToSub in sqlResults:
|
||||
if not ek.ek(os.path.isfile, epToSub['location']):
|
||||
logger.log('Episode file does not exist, cannot download subtitles for episode %dx%d of show %s' % (
|
||||
epToSub['season'], epToSub['episode'], epToSub['show_name']), logger.DEBUG)
|
||||
logger.log('Episode file does not exist, cannot download subtitles for episode %dx%d of show %s' % (epToSub['season'], epToSub['episode'], epToSub['show_name']), logger.DEBUG)
|
||||
continue
|
||||
|
||||
|
||||
# Old shows rule
|
||||
if ((epToSub['airdate_daydiff'] > 7 and epToSub['searchcount'] < 2 and now - datetime.datetime.strptime(
|
||||
epToSub['lastsearch'], '%Y-%m-%d %H:%M:%S') > datetime.timedelta(
|
||||
hours=rules['old'][epToSub['searchcount']])) or
|
||||
if ((epToSub['airdate_daydiff'] > 7 and epToSub['searchcount'] < 2 and now - datetime.datetime.strptime(epToSub['lastsearch'], '%Y-%m-%d %H:%M:%S') > datetime.timedelta(hours=rules['old'][epToSub['searchcount']])) or
|
||||
# Recent shows rule
|
||||
(epToSub['airdate_daydiff'] <= 7 and epToSub[
|
||||
'searchcount'] < 7 and now - datetime.datetime.strptime(epToSub['lastsearch'],
|
||||
'%Y-%m-%d %H:%M:%S') > datetime.timedelta(
|
||||
hours=rules['new'][epToSub['searchcount']]))):
|
||||
logger.log('Downloading subtitles for episode %dx%d of show %s' % (
|
||||
epToSub['season'], epToSub['episode'], epToSub['show_name']), logger.DEBUG)
|
||||
|
||||
(epToSub['airdate_daydiff'] <= 7 and epToSub['searchcount'] < 7 and now - datetime.datetime.strptime(epToSub['lastsearch'], '%Y-%m-%d %H:%M:%S') > datetime.timedelta(hours=rules['new'][epToSub['searchcount']]))):
|
||||
logger.log('Downloading subtitles for episode %dx%d of show %s' % (epToSub['season'], epToSub['episode'], epToSub['show_name']), logger.DEBUG)
|
||||
|
||||
showObj = helpers.findCertainShow(sickbeard.showList, int(epToSub['showid']))
|
||||
if not showObj:
|
||||
logger.log(u'Show not found', logger.DEBUG)
|
||||
return
|
||||
|
||||
|
||||
epObj = showObj.getEpisode(int(epToSub["season"]), int(epToSub["episode"]))
|
||||
if isinstance(epObj, str):
|
||||
logger.log(u'Episode not found', logger.DEBUG)
|
||||
return
|
||||
|
||||
|
||||
previous_subtitles = epObj.subtitles
|
||||
|
||||
|
||||
try:
|
||||
subtitles = epObj.downloadSubtitles()
|
||||
|
||||
|
|
|
@ -80,6 +80,9 @@ class TVShow(object):
|
|||
self.lang = lang
|
||||
self.last_update_indexer = 1
|
||||
|
||||
self.rls_ignore_words = ""
|
||||
self.rls_require_words = ""
|
||||
|
||||
self.lock = threading.Lock()
|
||||
self._isDirGood = False
|
||||
|
||||
|
@ -710,10 +713,13 @@ class TVShow(object):
|
|||
|
||||
self.last_update_indexer = sqlResults[0]["last_update_indexer"]
|
||||
|
||||
self.rls_ignore_words = sqlResults[0]["rls_ignore_words"]
|
||||
self.rls_require_words = sqlResults[0]["rls_require_words"]
|
||||
|
||||
if not self.imdbid:
|
||||
self.imdbid = sqlResults[0]["imdb_id"]
|
||||
|
||||
#Get IMDb_info from database
|
||||
#Get IMDb_info from database
|
||||
sqlResults = myDB.select("SELECT * FROM imdb_info WHERE indexer_id = ?", [self.indexerid])
|
||||
|
||||
if len(sqlResults) == 0:
|
||||
|
@ -976,7 +982,9 @@ class TVShow(object):
|
|||
"startyear": self.startyear,
|
||||
"lang": self.lang,
|
||||
"imdb_id": self.imdbid,
|
||||
"last_update_indexer": self.last_update_indexer
|
||||
"last_update_indexer": self.last_update_indexer,
|
||||
"rls_ignore_words": self.rls_ignore_words,
|
||||
"rls_require_words": self.rls_require_words
|
||||
}
|
||||
myDB.upsert("tv_shows", newValueDict, controlValueDict)
|
||||
|
||||
|
|
|
@ -2850,7 +2850,7 @@ class Home:
|
|||
@cherrypy.expose
|
||||
def editShow(self, show=None, location=None, anyQualities=[], bestQualities=[], exceptions_list=[],
|
||||
flatten_folders=None, paused=None, directCall=False, air_by_date=None, dvdorder=None, indexerLang=None,
|
||||
subtitles=None, archive_firstmatch=None):
|
||||
subtitles=None, archive_firstmatch=None, rls_ignore_words=None, rls_require_words=None):
|
||||
|
||||
if show is None:
|
||||
errString = "Invalid show ID: " + str(show)
|
||||
|
@ -2935,6 +2935,9 @@ class Home:
|
|||
showObj.dvdorder = dvdorder
|
||||
showObj.archive_firstmatch = archive_firstmatch
|
||||
|
||||
showObj.rls_ignore_words = rls_ignore_words
|
||||
showObj.rls_require_words = rls_require_words
|
||||
|
||||
# if we change location clear the db of episodes, change it, write to db, and rescan
|
||||
if os.path.normpath(showObj._location) != os.path.normpath(location):
|
||||
logger.log(os.path.normpath(showObj._location) + " != " + os.path.normpath(location), logger.DEBUG)
|
||||
|
|
Loading…
Reference in a new issue