mirror of
https://github.com/SickGear/SickGear.git
synced 2025-01-05 17:43:37 +00:00
Merge branch 'dev' of /home/git/repositories/echel0n/sickrage
This commit is contained in:
commit
a74181dca2
11 changed files with 2112 additions and 2069 deletions
1063
SickBeard.py
1063
SickBeard.py
File diff suppressed because it is too large
Load diff
BIN
gui/slick/images/network/dr1.png
Normal file
BIN
gui/slick/images/network/dr1.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 965 B |
BIN
gui/slick/images/network/dr2.png
Normal file
BIN
gui/slick/images/network/dr2.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1 KiB |
BIN
gui/slick/images/network/tv2.png
Normal file
BIN
gui/slick/images/network/tv2.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 760 B |
|
@ -1237,8 +1237,13 @@
|
|||
<div class="field-pair">
|
||||
<input type="checkbox" name="trakt_remove_watchlist" id="trakt_remove_watchlist" #if $sickbeard.TRAKT_REMOVE_WATCHLIST then "checked=\"checked\"" else ""# />
|
||||
<label class="clearfix" for="trakt_remove_watchlist">
|
||||
<span class="component-title">Remove from Watchlist:</span>
|
||||
<span class="component-title">Remove episode:</span>
|
||||
<span class="component-desc">Remove an episode from the watchlist after it is downloaded</span>
|
||||
</label>
|
||||
<input type="checkbox" name="trakt_remove_serieslist" id="trakt_remove_serieslist" #if $sickbeard.TRAKT_REMOVE_SERIESLIST then "checked=\"checked\"" else ""# />
|
||||
<label class="clearfix" for="trakt_remove_serieslist">
|
||||
<span class="component-title">Remove series:</span>
|
||||
<span class="component-desc">Remove the whole series from the watchlist after any download</span>
|
||||
</label>
|
||||
</div>
|
||||
<div class="field-pair">
|
||||
|
|
|
@ -1,143 +1,145 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2012 Nicolas Wack <wackou@gmail.com>
|
||||
#
|
||||
# This file is part of subliminal.
|
||||
#
|
||||
# subliminal is free software; you can redistribute it and/or modify it under
|
||||
# the terms of the GNU Lesser General Public License as published by
|
||||
# the Free Software Foundation; either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# subliminal is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
||||
from . import ServiceBase
|
||||
from ..cache import cachedmethod
|
||||
from ..language import language_set, Language
|
||||
from ..subtitles import get_subtitle_path, ResultSubtitle
|
||||
from ..utils import get_keywords
|
||||
from ..videos import Episode
|
||||
from bs4 import BeautifulSoup
|
||||
import logging
|
||||
import re
|
||||
|
||||
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
def match(pattern, string):
|
||||
try:
|
||||
return re.search(pattern, string).group(1)
|
||||
except AttributeError:
|
||||
logger.debug(u'Could not match %r on %r' % (pattern, string))
|
||||
return None
|
||||
|
||||
|
||||
class TvSubtitles(ServiceBase):
|
||||
server_url = 'http://www.tvsubtitles.net'
|
||||
site_url = 'http://www.tvsubtitles.net'
|
||||
api_based = False
|
||||
languages = language_set(['ar', 'bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', 'hu',
|
||||
'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'uk',
|
||||
'zh', 'pb'])
|
||||
#TODO: Find more exceptions
|
||||
language_map = {'gr': Language('gre'), 'cz': Language('cze'), 'ua': Language('ukr'),
|
||||
'cn': Language('chi'), 'br': Language('pob')}
|
||||
videos = [Episode]
|
||||
require_video = False
|
||||
required_features = ['permissive']
|
||||
|
||||
@cachedmethod
|
||||
def get_likely_series_id(self, name):
|
||||
r = self.session.post('%s/search.php' % self.server_url, data={'q': name})
|
||||
soup = BeautifulSoup(r.content, self.required_features)
|
||||
maindiv = soup.find('div', 'left')
|
||||
results = []
|
||||
for elem in maindiv.find_all('li'):
|
||||
sid = int(match('tvshow-([0-9]+)\.html', elem.a['href']))
|
||||
show_name = match('(.*) \(', elem.a.text)
|
||||
results.append((show_name, sid))
|
||||
#TODO: pick up the best one in a smart way
|
||||
result = results[0]
|
||||
return result[1]
|
||||
|
||||
@cachedmethod
|
||||
def get_episode_id(self, series_id, season, number):
|
||||
"""Get the TvSubtitles id for the given episode. Raises KeyError if none
|
||||
could be found."""
|
||||
# download the page of the season, contains ids for all episodes
|
||||
episode_id = None
|
||||
r = self.session.get('%s/tvshow-%d-%d.html' % (self.server_url, series_id, season))
|
||||
soup = BeautifulSoup(r.content, self.required_features)
|
||||
table = soup.find('table', id='table5')
|
||||
for row in table.find_all('tr'):
|
||||
cells = row.find_all('td')
|
||||
if not cells:
|
||||
continue
|
||||
episode_number = match('x([0-9]+)', cells[0].text)
|
||||
if not episode_number:
|
||||
continue
|
||||
episode_number = int(episode_number)
|
||||
episode_id = int(match('episode-([0-9]+)', cells[1].a['href']))
|
||||
# we could just return the id of the queried episode, but as we
|
||||
# already downloaded the whole page we might as well fill in the
|
||||
# information for all the episodes of the season
|
||||
self.cache_for(self.get_episode_id, args=(series_id, season, episode_number), result=episode_id)
|
||||
# raises KeyError if not found
|
||||
return self.cached_value(self.get_episode_id, args=(series_id, season, number))
|
||||
|
||||
# Do not cache this method in order to always check for the most recent
|
||||
# subtitles
|
||||
def get_sub_ids(self, episode_id):
|
||||
subids = []
|
||||
r = self.session.get('%s/episode-%d.html' % (self.server_url, episode_id))
|
||||
epsoup = BeautifulSoup(r.content, self.required_features)
|
||||
for subdiv in epsoup.find_all('a'):
|
||||
if 'href' not in subdiv.attrs or not subdiv['href'].startswith('/subtitle'):
|
||||
continue
|
||||
subid = int(match('([0-9]+)', subdiv['href']))
|
||||
lang = self.get_language(match('flags/(.*).gif', subdiv.img['src']))
|
||||
result = {'subid': subid, 'language': lang}
|
||||
for p in subdiv.find_all('p'):
|
||||
if 'alt' in p.attrs and p['alt'] == 'rip':
|
||||
result['rip'] = p.text.strip()
|
||||
if 'alt' in p.attrs and p['alt'] == 'release':
|
||||
result['release'] = p.text.strip()
|
||||
subids.append(result)
|
||||
return subids
|
||||
|
||||
def list_checked(self, video, languages):
|
||||
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
|
||||
|
||||
def query(self, filepath, languages, keywords, series, season, episode):
|
||||
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
|
||||
self.init_cache()
|
||||
sid = self.get_likely_series_id(series.lower())
|
||||
try:
|
||||
ep_id = self.get_episode_id(sid, season, episode)
|
||||
except KeyError:
|
||||
logger.debug(u'Could not find episode id for %s season %d episode %d' % (series, season, episode))
|
||||
return []
|
||||
subids = self.get_sub_ids(ep_id)
|
||||
# filter the subtitles with our queried languages
|
||||
subtitles = []
|
||||
for subid in subids:
|
||||
language = subid['language']
|
||||
if language not in languages:
|
||||
continue
|
||||
path = get_subtitle_path(filepath, language, self.config.multi)
|
||||
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s/download-%d.html' % (self.server_url, subid['subid']),
|
||||
keywords=[subid['rip'], subid['release']])
|
||||
subtitles.append(subtitle)
|
||||
return subtitles
|
||||
|
||||
def download(self, subtitle):
|
||||
self.download_zip_file(subtitle.link, subtitle.path)
|
||||
return subtitle
|
||||
|
||||
|
||||
Service = TvSubtitles
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2012 Nicolas Wack <wackou@gmail.com>
|
||||
#
|
||||
# This file is part of subliminal.
|
||||
#
|
||||
# subliminal is free software; you can redistribute it and/or modify it under
|
||||
# the terms of the GNU Lesser General Public License as published by
|
||||
# the Free Software Foundation; either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# subliminal is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
||||
from . import ServiceBase
|
||||
from ..cache import cachedmethod
|
||||
from ..language import language_set, Language
|
||||
from ..subtitles import get_subtitle_path, ResultSubtitle
|
||||
from ..utils import get_keywords
|
||||
from ..videos import Episode
|
||||
from bs4 import BeautifulSoup
|
||||
import logging
|
||||
import re
|
||||
|
||||
|
||||
logger = logging.getLogger("subliminal")
|
||||
|
||||
|
||||
def match(pattern, string):
|
||||
try:
|
||||
return re.search(pattern, string).group(1)
|
||||
except AttributeError:
|
||||
logger.debug(u'Could not match %r on %r' % (pattern, string))
|
||||
return None
|
||||
|
||||
|
||||
class TvSubtitles(ServiceBase):
|
||||
server_url = 'http://www.tvsubtitles.net'
|
||||
site_url = 'http://www.tvsubtitles.net'
|
||||
api_based = False
|
||||
languages = language_set(['ar', 'bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', 'hu',
|
||||
'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'uk',
|
||||
'zh', 'pb'])
|
||||
#TODO: Find more exceptions
|
||||
language_map = {'gr': Language('gre'), 'cz': Language('cze'), 'ua': Language('ukr'),
|
||||
'cn': Language('chi'), 'br': Language('pob')}
|
||||
videos = [Episode]
|
||||
require_video = False
|
||||
required_features = ['permissive']
|
||||
|
||||
@cachedmethod
|
||||
def get_likely_series_id(self, name):
|
||||
r = self.session.post('%s/search.php' % self.server_url, data={'q': name})
|
||||
soup = BeautifulSoup(r.content, self.required_features)
|
||||
maindiv = soup.find('div', 'left')
|
||||
results = []
|
||||
for elem in maindiv.find_all('li'):
|
||||
sid = int(match('tvshow-([0-9]+)\.html', elem.a['href']))
|
||||
show_name = match('(.*) \(', elem.a.text)
|
||||
results.append((show_name, sid))
|
||||
|
||||
if len(results):
|
||||
#TODO: pick up the best one in a smart way
|
||||
result = results[0]
|
||||
return result[1]
|
||||
|
||||
@cachedmethod
|
||||
def get_episode_id(self, series_id, season, number):
|
||||
"""Get the TvSubtitles id for the given episode. Raises KeyError if none
|
||||
could be found."""
|
||||
# download the page of the season, contains ids for all episodes
|
||||
episode_id = None
|
||||
r = self.session.get('%s/tvshow-%d-%d.html' % (self.server_url, series_id, season))
|
||||
soup = BeautifulSoup(r.content, self.required_features)
|
||||
table = soup.find('table', id='table5')
|
||||
for row in table.find_all('tr'):
|
||||
cells = row.find_all('td')
|
||||
if not cells:
|
||||
continue
|
||||
episode_number = match('x([0-9]+)', cells[0].text)
|
||||
if not episode_number:
|
||||
continue
|
||||
episode_number = int(episode_number)
|
||||
episode_id = int(match('episode-([0-9]+)', cells[1].a['href']))
|
||||
# we could just return the id of the queried episode, but as we
|
||||
# already downloaded the whole page we might as well fill in the
|
||||
# information for all the episodes of the season
|
||||
self.cache_for(self.get_episode_id, args=(series_id, season, episode_number), result=episode_id)
|
||||
# raises KeyError if not found
|
||||
return self.cached_value(self.get_episode_id, args=(series_id, season, number))
|
||||
|
||||
# Do not cache this method in order to always check for the most recent
|
||||
# subtitles
|
||||
def get_sub_ids(self, episode_id):
|
||||
subids = []
|
||||
r = self.session.get('%s/episode-%d.html' % (self.server_url, episode_id))
|
||||
epsoup = BeautifulSoup(r.content, self.required_features)
|
||||
for subdiv in epsoup.find_all('a'):
|
||||
if 'href' not in subdiv.attrs or not subdiv['href'].startswith('/subtitle'):
|
||||
continue
|
||||
subid = int(match('([0-9]+)', subdiv['href']))
|
||||
lang = self.get_language(match('flags/(.*).gif', subdiv.img['src']))
|
||||
result = {'subid': subid, 'language': lang}
|
||||
for p in subdiv.find_all('p'):
|
||||
if 'alt' in p.attrs and p['alt'] == 'rip':
|
||||
result['rip'] = p.text.strip()
|
||||
if 'alt' in p.attrs and p['alt'] == 'release':
|
||||
result['release'] = p.text.strip()
|
||||
subids.append(result)
|
||||
return subids
|
||||
|
||||
def list_checked(self, video, languages):
|
||||
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
|
||||
|
||||
def query(self, filepath, languages, keywords, series, season, episode):
|
||||
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
|
||||
self.init_cache()
|
||||
sid = self.get_likely_series_id(series.lower())
|
||||
try:
|
||||
ep_id = self.get_episode_id(sid, season, episode)
|
||||
except KeyError:
|
||||
logger.debug(u'Could not find episode id for %s season %d episode %d' % (series, season, episode))
|
||||
return []
|
||||
subids = self.get_sub_ids(ep_id)
|
||||
# filter the subtitles with our queried languages
|
||||
subtitles = []
|
||||
for subid in subids:
|
||||
language = subid['language']
|
||||
if language not in languages:
|
||||
continue
|
||||
path = get_subtitle_path(filepath, language, self.config.multi)
|
||||
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s/download-%d.html' % (self.server_url, subid['subid']),
|
||||
keywords=[subid['rip'], subid['release']])
|
||||
subtitles.append(subtitle)
|
||||
return subtitles
|
||||
|
||||
def download(self, subtitle):
|
||||
self.download_zip_file(subtitle.link, subtitle.path)
|
||||
return subtitle
|
||||
|
||||
|
||||
Service = TvSubtitles
|
|
@ -1,7 +1,8 @@
|
|||
import urllib2
|
||||
from urllib2 import Request, urlopen, HTTPError, URLError
|
||||
|
||||
import base64
|
||||
from sha import new as sha1
|
||||
|
||||
from hashlib import sha1
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
|
@ -25,25 +26,35 @@ def TraktCall(method, api, username=None, password=None, data={}):
|
|||
if not api:
|
||||
return None
|
||||
|
||||
# if the username isn't given then it failed
|
||||
if username and password:
|
||||
password = sha1(password).hexdigest()
|
||||
data["username"] = username
|
||||
data["password"] = password
|
||||
|
||||
# replace the API string with what we found
|
||||
method = method.replace("%API%", api)
|
||||
|
||||
# make the full url
|
||||
url = 'https://api.trakt.tv/' + method
|
||||
|
||||
# take the URL params and make a json object out of them
|
||||
encoded_data = json.dumps(data)
|
||||
encoded_data = json.JSONEncoder().encode(data)
|
||||
|
||||
request = Request(url, encoded_data)
|
||||
|
||||
# if the username isn't given then it failed
|
||||
if username and password:
|
||||
pwdsha1 = sha1(password).hexdigest()
|
||||
base64string = base64.encodestring('%s:%s' % (username, pwdsha1)).replace('\n', '')
|
||||
request.add_header("Accept", "*/*")
|
||||
request.add_header("User-Agent", "CPython/2.7.5 Unknown/Unknown")
|
||||
request.add_header("Authorization", "Basic %s" % base64string)
|
||||
|
||||
# request the URL from trakt and parse the result as json
|
||||
try:
|
||||
#logger.log("trakt: Calling method http://api.trakt.tv/" + method + ", with data" + encoded_data, logger.DEBUG)
|
||||
stream = urllib2.urlopen("http://api.trakt.tv/" + method, encoded_data)
|
||||
resp = stream.read()
|
||||
stream = urlopen(request).read()
|
||||
|
||||
resp = json.loads(resp)
|
||||
# check if results are valid
|
||||
if stream == '[]':
|
||||
resp = 'NULL'
|
||||
else:
|
||||
resp = json.JSONDecoder().decode(stream)
|
||||
|
||||
if ("error" in resp):
|
||||
raise Exception(resp["error"])
|
||||
|
|
|
@ -351,6 +351,7 @@ TRAKT_USERNAME = None
|
|||
TRAKT_PASSWORD = None
|
||||
TRAKT_API = ''
|
||||
TRAKT_REMOVE_WATCHLIST = False
|
||||
TRAKT_REMOVE_SERIESLIST = False
|
||||
TRAKT_USE_WATCHLIST = False
|
||||
TRAKT_METHOD_ADD = 0
|
||||
TRAKT_START_PAUSED = False
|
||||
|
|
2765
sickbeard/helpers.py
2765
sickbeard/helpers.py
File diff suppressed because it is too large
Load diff
|
@ -53,6 +53,10 @@ class TraktChecker():
|
|||
def findShow(self, indexer, indexerid):
|
||||
library = TraktCall("user/library/shows/all.json/%API%/" + sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_API, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD)
|
||||
|
||||
if library == 'NULL':
|
||||
logger.log(u"No shows found in your library, aborting library update", logger.DEBUG)
|
||||
return
|
||||
|
||||
if not library:
|
||||
logger.log(u"Could not connect to trakt service, aborting library check", logger.ERROR)
|
||||
return
|
||||
|
@ -102,6 +106,10 @@ class TraktChecker():
|
|||
logger.log(u"Starting trakt show watchlist check", logger.DEBUG)
|
||||
watchlist = TraktCall("user/watchlist/shows.json/%API%/" + sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_API, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD)
|
||||
|
||||
if watchlist == 'NULL':
|
||||
logger.log(u"No shows found in your watchlist, aborting watchlist update", logger.DEBUG)
|
||||
return
|
||||
|
||||
if not watchlist:
|
||||
logger.log(u"Could not connect to trakt service, aborting watchlist update", logger.ERROR)
|
||||
return
|
||||
|
@ -134,6 +142,10 @@ class TraktChecker():
|
|||
logger.log(u"Starting trakt episode watchlist check", logger.DEBUG)
|
||||
watchlist = TraktCall("user/watchlist/episodes.json/%API%/" + sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_API, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD)
|
||||
|
||||
if watchlist == 'NULL':
|
||||
logger.log(u"No episodes found in your watchlist, aborting watchlist update", logger.DEBUG)
|
||||
return
|
||||
|
||||
if not watchlist:
|
||||
logger.log(u"Could not connect to trakt service, aborting watchlist update", logger.ERROR)
|
||||
return
|
||||
|
|
|
@ -2252,7 +2252,7 @@ class ConfigNotifications(MainHandler):
|
|||
use_trakt=None, trakt_username=None, trakt_password=None, trakt_api=None,
|
||||
trakt_remove_watchlist=None, trakt_use_watchlist=None, trakt_method_add=None,
|
||||
trakt_start_paused=None, trakt_use_recommended=None, trakt_sync=None,
|
||||
trakt_default_indexer=None,
|
||||
trakt_default_indexer=None, trakt_remove_serieslist=None,
|
||||
use_synologynotifier=None, synologynotifier_notify_onsnatch=None,
|
||||
synologynotifier_notify_ondownload=None, synologynotifier_notify_onsubtitledownload=None,
|
||||
use_pytivo=None, pytivo_notify_onsnatch=None, pytivo_notify_ondownload=None,
|
||||
|
@ -2360,6 +2360,7 @@ class ConfigNotifications(MainHandler):
|
|||
sickbeard.TRAKT_PASSWORD = trakt_password
|
||||
sickbeard.TRAKT_API = trakt_api
|
||||
sickbeard.TRAKT_REMOVE_WATCHLIST = config.checkbox_to_value(trakt_remove_watchlist)
|
||||
sickbeard.TRAKT_REMOVE_SERIESLIST = config.checkbox_to_value(trakt_remove_serieslist)
|
||||
sickbeard.TRAKT_USE_WATCHLIST = config.checkbox_to_value(trakt_use_watchlist)
|
||||
sickbeard.TRAKT_METHOD_ADD = trakt_method_add
|
||||
sickbeard.TRAKT_START_PAUSED = config.checkbox_to_value(trakt_start_paused)
|
||||
|
@ -2795,6 +2796,11 @@ class NewHomeAddShows(MainHandler):
|
|||
|
||||
logger.log(u"Getting recommended shows from Trakt.tv", logger.DEBUG)
|
||||
recommendedlist = TraktCall("recommendations/shows.json/%API%", sickbeard.TRAKT_API, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD)
|
||||
|
||||
if recommendedlist == 'NULL':
|
||||
logger.log(u"No shows found in your recommendedlist, aborting recommendedlist update", logger.DEBUG)
|
||||
return
|
||||
|
||||
if recommendedlist is None:
|
||||
logger.log(u"Could not connect to trakt service, aborting recommended list update", logger.ERROR)
|
||||
return
|
||||
|
|
Loading…
Reference in a new issue