Merge pull request #716 from echel0n/dev

Dev
This commit is contained in:
echel0n 2014-08-13 15:00:20 -07:00
commit 252a81747a
34 changed files with 778 additions and 1677 deletions

File diff suppressed because it is too large Load diff

View file

@ -1224,9 +1224,9 @@
<label class="nocheck clearfix">
<span class="component-title">Watchlist Add Method:</span>
<select id="trakt_method_add" name="trakt_method_add">
<option value="0" #if $sickbeard.TRAKT_METHOD_ADD == "0" then 'selected="selected"' else ""#>Skip All</option>
<option value="1" #if $sickbeard.TRAKT_METHOD_ADD == "1" then 'selected="selected"' else ""#>Download Pilot Only</option>
<option value="2" #if $sickbeard.TRAKT_METHOD_ADD == "2" then 'selected="selected"' else ""#>Get whole show</option>
<option value="0" #if $sickbeard.TRAKT_METHOD_ADD == 0 then "selected=\"selected\"" else ""#>Skip All</option>
<option value="1" #if $sickbeard.TRAKT_METHOD_ADD == 1 then "selected=\"selected\"" else ""#>Download Pilot Only</option>
<option value="2" #if $sickbeard.TRAKT_METHOD_ADD == 2 then "selected=\"selected\"" else ""#>Get whole show</option>
</select>
</label>
<label class="nocheck clearfix">

View file

@ -313,6 +313,76 @@
<span class="component-desc">Category for downloads to go into (eg. TV)</span>
</label>
</div>
<%
if config['nzbget_priority'] == -100:
prio_verylow = 'selected="selected"'
prio_low = ''
prio_normal = ''
prio_high = ''
prio_veryhigh = ''
prio_force = ''
elif config['nzbget_priority'] == -50:
prio_verylow = ''
prio_low = 'selected="selected"'
prio_normal = ''
prio_high = ''
prio_veryhigh = ''
prio_force = ''
elif config['nzbget_priority'] == 0:
prio_verylow = ''
prio_low = ''
prio_normal = 'selected="selected"'
prio_high = ''
prio_veryhigh = ''
prio_force = ''
elif config['nzbget_priority'] == 50:
prio_verylow = ''
prio_low = ''
prio_normal = ''
prio_high = 'selected="selected"'
prio_veryhigh = ''
prio_force = ''
elif config['nzbget_priority'] == 100:
prio_verylow = ''
prio_low = ''
prio_normal = ''
prio_high = ''
prio_veryhigh = 'selected="selected"'
prio_force = ''
elif config['nzbget_priority'] == 900:
prio_verylow = ''
prio_low = ''
prio_normal = ''
prio_high = ''
prio_veryhigh = ''
prio_force = 'selected="selected"'
else:
prio_verylow = ''
prio_low = ''
prio_normal = 'selected="selected"'
prio_high = ''
prio_veryhigh = ''
prio_force = ''
%>
<div class="field-pair">
<label class="nocheck clearfix">
<span class="component-title">NZBget Priority</span>
<select name="nzbget_priority" id="nzbget_priority">
<option value="-100" ${prio_verylow}>Very Low</option>
<option value="-50" ${prio_low}>Low</option>
<option value="0" ${prio_normal}>Normal</option>
<option value="50" ${prio_high}>High</option>
<option value="100" ${prio_veryhigh}>Very High</option>
<option value="900" ${prio_force}>Force</option>
</select>
</label>
<label class="nocheck clearfix">
<span class="component-title">&nbsp;</span>
<span class="component-desc">Priority for daily snatches (no backlog)</span>
</label>
</div>
</div>
<div class="clearfix"></div>

View file

@ -240,6 +240,7 @@ NZBGET_PASSWORD = None
NZBGET_CATEGORY = None
NZBGET_HOST = None
NZBGET_USE_HTTPS = False
NZBGET_PRIORITY = 100
TORRENT_USERNAME = None
TORRENT_PASSWORD = None
@ -446,11 +447,11 @@ def initialize(consoleLogging=True):
global BRANCH, ACTUAL_LOG_DIR, LOG_DIR, WEB_PORT, WEB_LOG, ENCRYPTION_VERSION, WEB_ROOT, WEB_USERNAME, WEB_PASSWORD, WEB_HOST, WEB_IPV6, USE_API, API_KEY, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, \
HANDLE_REVERSE_PROXY, USE_NZBS, USE_TORRENTS, NZB_METHOD, NZB_DIR, DOWNLOAD_PROPERS, CHECK_PROPERS_INTERVAL, ALLOW_HIGH_PRIORITY, TORRENT_METHOD, \
SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_HOST, \
NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_HOST, NZBGET_USE_HTTPS, backlogSearchScheduler, \
NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_PRIORITY, NZBGET_HOST, NZBGET_USE_HTTPS, backlogSearchScheduler, \
TORRENT_USERNAME, TORRENT_PASSWORD, TORRENT_HOST, TORRENT_PATH, TORRENT_SEED_TIME, TORRENT_PAUSED, TORRENT_HIGH_BANDWIDTH, TORRENT_LABEL, TORRENT_VERIFY_CERT, \
USE_XBMC, XBMC_ALWAYS_ON, XBMC_NOTIFY_ONSNATCH, XBMC_NOTIFY_ONDOWNLOAD, XBMC_NOTIFY_ONSUBTITLEDOWNLOAD, XBMC_UPDATE_FULL, XBMC_UPDATE_ONLYFIRST, \
XBMC_UPDATE_LIBRARY, XBMC_HOST, XBMC_USERNAME, XBMC_PASSWORD, BACKLOG_FREQUENCY, \
USE_TRAKT, TRAKT_USERNAME, TRAKT_PASSWORD, TRAKT_API, TRAKT_REMOVE_WATCHLIST, TRAKT_USE_WATCHLIST, TRAKT_METHOD_ADD, TRAKT_START_PAUSED, traktCheckerScheduler, TRAKT_USE_RECOMMENDED, TRAKT_SYNC, TRAKT_DEFAULT_INDEXER, \
USE_TRAKT, TRAKT_USERNAME, TRAKT_PASSWORD, TRAKT_API, TRAKT_REMOVE_WATCHLIST, TRAKT_USE_WATCHLIST, TRAKT_METHOD_ADD, TRAKT_START_PAUSED, traktCheckerScheduler, TRAKT_USE_RECOMMENDED, TRAKT_SYNC, TRAKT_DEFAULT_INDEXER, TRAKT_REMOVE_SERIESLIST, \
USE_PLEX, PLEX_NOTIFY_ONSNATCH, PLEX_NOTIFY_ONDOWNLOAD, PLEX_NOTIFY_ONSUBTITLEDOWNLOAD, PLEX_UPDATE_LIBRARY, \
PLEX_SERVER_HOST, PLEX_HOST, PLEX_USERNAME, PLEX_PASSWORD, DEFAULT_BACKLOG_FREQUENCY, MIN_BACKLOG_FREQUENCY, BACKLOG_STARTUP, SKIP_REMOVED_FILES, \
showUpdateScheduler, __INITIALIZED__, LAUNCH_BROWSER, UPDATE_SHOWS_ON_START, SORT_ARTICLE, showList, loadingShowList, \
@ -639,7 +640,7 @@ def initialize(consoleLogging=True):
USENET_RETENTION = check_setting_int(CFG, 'General', 'usenet_retention', 500)
AUTOPOSTPROCESSER_FREQUENCY = check_setting_int(CFG, 'General', 'dailysearch_frequency',
AUTOPOSTPROCESSER_FREQUENCY = check_setting_int(CFG, 'General', 'autopostprocesser_frequency',
DEFAULT_AUTOPOSTPROCESSER_FREQUENCY)
if AUTOPOSTPROCESSER_FREQUENCY < MIN_AUTOPOSTPROCESSER_FREQUENCY:
AUTOPOSTPROCESSER_FREQUENCY = MIN_AUTOPOSTPROCESSER_FREQUENCY
@ -691,6 +692,7 @@ def initialize(consoleLogging=True):
NZBGET_CATEGORY = check_setting_str(CFG, 'NZBget', 'nzbget_category', 'tv')
NZBGET_HOST = check_setting_str(CFG, 'NZBget', 'nzbget_host', '')
NZBGET_USE_HTTPS = bool(check_setting_int(CFG, 'NZBget', 'nzbget_use_https', 0))
NZBGET_PRIORITY = check_setting_int(CFG, 'NZBget', 'nzbget_priority', 100)
TORRENT_USERNAME = check_setting_str(CFG, 'TORRENT', 'torrent_username', '')
TORRENT_PASSWORD = check_setting_str(CFG, 'TORRENT', 'torrent_password', '')
@ -798,6 +800,7 @@ def initialize(consoleLogging=True):
TRAKT_PASSWORD = check_setting_str(CFG, 'Trakt', 'trakt_password', '')
TRAKT_API = check_setting_str(CFG, 'Trakt', 'trakt_api', '')
TRAKT_REMOVE_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_remove_watchlist', 0))
TRAKT_REMOVE_SERIESLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_remove_serieslist', 0))
TRAKT_USE_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_use_watchlist', 0))
TRAKT_METHOD_ADD = check_setting_int(CFG, 'Trakt', 'trakt_method_add', 0)
TRAKT_START_PAUSED = bool(check_setting_int(CFG, 'Trakt', 'trakt_start_paused', 0))
@ -1498,6 +1501,7 @@ def save_config():
new_config['NZBget']['nzbget_category'] = NZBGET_CATEGORY
new_config['NZBget']['nzbget_host'] = NZBGET_HOST
new_config['NZBget']['nzbget_use_https'] = int(NZBGET_USE_HTTPS)
new_config['NZBget']['nzbget_priority'] = NZBGET_PRIORITY
new_config['TORRENT'] = {}
new_config['TORRENT']['torrent_username'] = TORRENT_USERNAME
@ -1615,6 +1619,7 @@ def save_config():
new_config['Trakt']['trakt_password'] = helpers.encrypt(TRAKT_PASSWORD, ENCRYPTION_VERSION)
new_config['Trakt']['trakt_api'] = TRAKT_API
new_config['Trakt']['trakt_remove_watchlist'] = int(TRAKT_REMOVE_WATCHLIST)
new_config['Trakt']['trakt_remove_serieslist'] = int(TRAKT_REMOVE_SERIESLIST)
new_config['Trakt']['trakt_use_watchlist'] = int(TRAKT_USE_WATCHLIST)
new_config['Trakt']['trakt_method_add'] = int(TRAKT_METHOD_ADD)
new_config['Trakt']['trakt_start_paused'] = int(TRAKT_START_PAUSED)

View file

@ -136,10 +136,9 @@ def replaceExtension(filename, newExt):
return sepFile[0] + "." + newExt
def isBtsyncFile(filename):
sepFile = filename.rpartition(".")
if sepFile[2].lower() == '!sync':
def isSyncFile(filename):
extension = filename.rpartition(".")[2].lower()
if extension == '!sync' or extension == 'lftp-pget-status':
return True
else:
return False

View file

@ -242,7 +242,10 @@ class NameParser(object):
# if we have an air-by-date show then get the real season/episode numbers
if bestResult.is_air_by_date or bestResult.is_sports:
airdate = bestResult.air_date.toordinal() if bestResult.air_date else bestResult.sports_air_date.toordinal()
try:
airdate = bestResult.air_date.toordinal()
except:
airdate = bestResult.sports_air_date.toordinal()
myDB = db.DBConnection()
sql_result = myDB.select(
@ -252,18 +255,41 @@ class NameParser(object):
if sql_result:
season_number = int(sql_result[0][0])
episode_numbers = [int(sql_result[0][1])]
if not season_number or not len(episode_numbers):
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(bestResult.show.indexer).api_params.copy()
for epNo in episode_numbers:
s = season_number
e = epNo
if bestResult.show.lang:
lINDEXER_API_PARMS['language'] = bestResult.show.lang
if self.convert:
(s, e) = scene_numbering.get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
season_number,
epNo)
new_episode_numbers.append(e)
new_season_numbers.append(s)
t = sickbeard.indexerApi(bestResult.show.indexer).indexer(**lINDEXER_API_PARMS)
if bestResult.is_air_by_date:
epObj = t[bestResult.show.indexerid].airedOn(parse_result.air_date)[0]
else:
epObj = t[bestResult.show.indexerid].airedOn(parse_result.sports_air_date)[0]
season_number = int(epObj["seasonnumber"])
episode_numbers = [int(epObj["episodenumber"])]
except sickbeard.indexer_episodenotfound:
logger.log(u"Unable to find episode with date " + str(parse_result.air_date) + " for show " + bestResult.show.name + ", skipping", logger.WARNING)
episode_numbers = []
except sickbeard.indexer_error, e:
logger.log(u"Unable to contact " + sickbeard.indexerApi(bestResult.show.indexer).name + ": " + ex(e), logger.WARNING)
episode_numbers = []
for epNo in episode_numbers:
s = season_number
e = epNo
if self.convert:
(s, e) = scene_numbering.get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
season_number,
epNo)
new_episode_numbers.append(e)
new_season_numbers.append(s)
elif bestResult.show.is_anime and len(bestResult.ab_episode_numbers):
scene_season = scene_exceptions.get_scene_exception_by_name(bestResult.series_name)[1]

View file

@ -82,7 +82,7 @@ def sendNZB(nzb, proper=False):
dupekey += "-" + str(curEp.season) + "." + str(curEp.episode)
if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
addToTop = True
nzbgetprio = 100
nzbgetprio = sickbeard.NZBGET_PRIORITY
if nzb.quality != Quality.UNKNOWN:
dupescore = nzb.quality * 100

View file

@ -138,11 +138,11 @@ def processDir(dirName, nzbName=None, process_method=None, force=False, is_prior
path, dirs, files = get_path_dir_files(dirName, nzbName, type)
btsyncFiles = filter(helpers.isBtsyncFile, files)
SyncFiles = filter(helpers.isSyncFile, files)
# Don't post process if files are still being synced from btsync
if btsyncFiles:
returnStr += logHelper(u"Found .!sync files, skipping post processing", logger.ERROR)
# Don't post process if files are still being synced
if SyncFiles:
returnStr += logHelper(u"Found temporary sync files, skipping post processing", logger.ERROR)
return returnStr
returnStr += logHelper(u"PostProcessing Path: " + path, logger.DEBUG)
@ -186,11 +186,11 @@ def processDir(dirName, nzbName=None, process_method=None, force=False, is_prior
for processPath, processDir, fileList in ek.ek(os.walk, ek.ek(os.path.join, path, dir), topdown=False):
btsyncFiles = filter(helpers.isBtsyncFile, fileList)
SyncFiles = filter(helpers.isSyncFile, fileList)
# Don't post process if files are still being synced from btsync
if btsyncFiles:
returnStr += logHelper(u"Found .!sync files, skipping post processing", logger.ERROR)
# Don't post process if files are still being synced
if SyncFiles:
returnStr += logHelper(u"Found temporary sync files, skipping post processing", logger.ERROR)
return returnStr
rarFiles = filter(helpers.isRarFile, fileList)

View file

@ -138,17 +138,17 @@ class ProperFinder():
# populate our Proper instance
curProper.season = parse_result.season_number if parse_result.season_number != None else 1
curProper.episode = parse_result.episode_numbers[0]
curProper.release_group = parse_result.release_group
curProper.version = parse_result.version
curProper.quality = Quality.nameQuality(curProper.name, parse_result.is_anime)
# only get anime proper if it has release group and version
if parse_result.is_anime:
if parse_result.release_group and parse_result.version:
curProper.release_group = parse_result.release_group
curProper.version = parse_result.version
else:
if not curProper.release_group and curProper.version == -1:
logger.log(u"Proper " + curProper.name + " doesn't have a release group and version, ignoring it",
logger.DEBUG)
continue
curProper.quality = Quality.nameQuality(curProper.name, parse_result.is_anime)
if not show_name_helpers.filterBadReleases(curProper.name):
logger.log(u"Proper " + curProper.name + " isn't a valid scene release that we want, ignoring it",
logger.DEBUG)

View file

@ -52,9 +52,6 @@ class Animezb(generic.NZBProvider):
def imageName(self):
return 'animezb.png'
def _checkAuth(self):
return True
def _get_season_search_strings(self, ep_obj):
return [x for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]
@ -147,7 +144,12 @@ class AnimezbCache(tvcache.TVCache):
logger.log(self.provider.name + u" cache update URL: " + rss_url, logger.DEBUG)
return self.getRSSFeed(rss_url).entries
data = self.getRSSFeed(rss_url)
if data and 'entries' in data:
return data.entries
else:
return []
provider = Animezb()

View file

@ -31,7 +31,7 @@ from sickbeard import db
from sickbeard import classes
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickbeard.exceptions import ex
from sickbeard.exceptions import ex, AuthException
from sickbeard.helpers import sanitizeSceneName
from sickbeard.bs4_parser import BS4Parser
from unidecode import unidecode
@ -75,6 +75,12 @@ class BitSoupProvider(generic.TorrentProvider):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _checkAuth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _doLogin(self):
login_params = {'username': self.username,

View file

@ -315,8 +315,5 @@ class BTNCache(tvcache.TVCache):
return self.provider._doSearch(search_params=None, age=seconds_since_last_update)
def _checkAuth(self, data):
return self.provider._checkAuthFromData(data)
provider = BTNProvider()

View file

@ -1,141 +0,0 @@
# Author: Harm van Tilborg <harm@zeroxcool.net>
# URL: https://github.com/hvt/Sick-Beard/tree/dtt
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import urllib
import sickbeard
import generic
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.helpers import sanitizeSceneName
from sickbeard import show_name_helpers
from sickbeard.exceptions import ex
class DTTProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "DailyTvTorrents")
self.supportsBacklog = True
self.enabled = False
self.ratio = None
self.cache = DTTCache(self)
self.url = 'http://www.dailytvtorrents.org/'
def isEnabled(self):
return self.enabled
def imageName(self):
return 'dailytvtorrents.gif'
def getQuality(self, item, anime=False):
url = item.enclosures[0].href
quality = Quality.sceneQuality(url)
return quality
def findSearchResults(self, show, season, episodes, search_mode, manualSearch=False):
return generic.TorrentProvider.findSearchResults(self, show, season, episodes, search_mode, manualSearch)
def _dtt_show_id(self, show_name):
return sanitizeSceneName(show_name).replace('.', '-').lower()
def _get_season_search_strings(self, ep_obj):
search_string = []
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
show_string = sanitizeSceneName(show_name).replace('.', '-').lower()
search_string.append(show_string)
return search_string
def _get_episode_search_strings(self, ep_obj, add_string=''):
return self._get_season_search_strings(ep_obj)
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
# show_id = self._dtt_show_id(self.show.name)
params = {"items": "all"}
if sickbeard.DTT_NORAR:
params.update({"norar": "yes"})
if sickbeard.DTT_SINGLE:
params.update({"single": "yes"})
searchURL = self.url + "rss/show/" + search_params + "?" + urllib.urlencode(params)
logger.log(u"Search string: " + searchURL, logger.DEBUG)
data = self.cache.getRSSFeed(searchURL)
if not data:
return []
try:
items = data.entries
except Exception, e:
logger.log(u"Error trying to load DTT RSS feed: " + ex(e), logger.ERROR)
logger.log(u"RSS data: " + data, logger.DEBUG)
return []
results = []
for curItem in items:
(title, url) = self._get_title_and_url(curItem)
results.append(curItem)
return results
def _get_title_and_url(self, item):
title = item.title
if title:
title = u'' + title
title = title.replace(' ', '.')
url = item.enclosures[0].href
return (title, url)
class DTTCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll DTT every 30 minutes max
self.minTime = 30
def _getDailyData(self):
params = {"items": "all"}
if sickbeard.DTT_NORAR:
params.update({"norar": "yes"})
if sickbeard.DTT_SINGLE:
params.update({"single": "yes"})
url = self.provider.url + 'rss/allshows?' + urllib.urlencode(params)
logger.log(u"DTT cache update URL: " + url, logger.DEBUG)
return self.getRSSFeed(url).entries
provider = DTTProvider()

View file

@ -179,7 +179,11 @@ class EZRSSCache(tvcache.TVCache):
rss_url = self.provider.url + 'feed/'
logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG)
return self.getRSSFeed(rss_url).entries
data = self.getRSSFeed(rss_url)
if data and 'entries' in data:
return data.entries
else:
return []
provider = EZRSSProvider()

View file

@ -139,7 +139,12 @@ class FanzubCache(tvcache.TVCache):
logger.log(self.provider.name + u" cache update URL: " + rss_url, logger.DEBUG)
return self.getRSSFeed(rss_url).entries
data = self.getRSSFeed(rss_url)
if data and 'entries' in data:
return data.entries
else:
return []
provider = Fanzub()

View file

@ -29,7 +29,7 @@ from sickbeard import db
from sickbeard import classes
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickbeard.exceptions import ex
from sickbeard.exceptions import ex, AuthException
from sickbeard import clients
from lib import requests
from lib.requests import exceptions
@ -78,6 +78,13 @@ class FreshOnTVProvider(generic.TorrentProvider):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _checkAuth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _doLogin(self):
if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):
return True
@ -301,6 +308,6 @@ class FreshOnTVCache(tvcache.TVCache):
def _getDailyData(self):
search_params = {'RSS': ['']}
return self.provider._doSearch(search_params).entries
return self.provider._doSearch(search_params)
provider = FreshOnTVProvider()

View file

@ -75,7 +75,7 @@ class GenericProvider:
return self.getID() + '.png'
def _checkAuth(self):
return
return True
def _doLogin(self):
return True

View file

@ -67,9 +67,6 @@ class HDBitsProvider(generic.TorrentProvider):
def _checkAuthFromData(self, parsedJSON):
if parsedJSON is None:
return self._checkAuth()
if 'status' in parsedJSON and 'message' in parsedJSON:
if parsedJSON.get('status') == 5:
logger.log(u"Incorrect authentication credentials for " + self.name + " : " + parsedJSON['message'],
@ -207,52 +204,17 @@ class HDBitsCache(tvcache.TVCache):
# only poll HDBits every 15 minutes max
self.minTime = 15
def updateCache(self):
def _getDailyData(self):
parsedJSON = self.provider.getURL(self.provider.rss_url, post_data=self.provider._make_post_data_JSON(), json=True)
# delete anything older then 7 days
self._clearCache()
if not self.provider._checkAuthFromData(parsedJSON):
return []
if not self.shouldUpdate():
return
if self._checkAuth(None):
parsedJSON = self._getRSSData()
if not parsedJSON:
logger.log(u"Error trying to load " + self.provider.name + " JSON feed", logger.ERROR)
return []
# mark updated
self.setLastUpdate()
if self._checkAuth(parsedJSON):
if parsedJSON and 'data' in parsedJSON:
items = parsedJSON['data']
else:
logger.log(u"Resulting JSON from " + self.provider.name + " isn't correct, not parsing it",
logger.ERROR)
return []
cl = []
for item in items:
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
else:
raise exceptions.AuthException(
"Your authentication info for " + self.provider.name + " is incorrect, check your config")
if parsedJSON and 'data' in parsedJSON:
return parsedJSON['data']
else:
return []
def _getRSSData(self):
return self.provider.getURL(self.provider.rss_url, post_data=self.provider._make_post_data_JSON(), json=True)
def _checkAuth(self, data):
return self.provider._checkAuthFromData(data)
provider = HDBitsProvider()

View file

@ -30,7 +30,7 @@ from sickbeard import db
from sickbeard import classes
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickbeard.exceptions import ex
from sickbeard.exceptions import ex, AuthException
from sickbeard import clients
from lib import requests
from lib.requests import exceptions
@ -82,6 +82,13 @@ class HDTorrentsProvider(generic.TorrentProvider):
quality = Quality.sceneQuality(item[0])
return quality
def _checkAuth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _doLogin(self):
if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):

View file

@ -29,7 +29,7 @@ from sickbeard import db
from sickbeard import classes
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickbeard.exceptions import ex
from sickbeard.exceptions import ex, AuthException
from sickbeard import clients
from lib import requests
from lib.requests import exceptions
@ -74,6 +74,13 @@ class IPTorrentsProvider(generic.TorrentProvider):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _checkAuth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _doLogin(self):
login_params = {'username': self.username,

View file

@ -1,340 +0,0 @@
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import sys
import time
import urllib, urlparse
from datetime import datetime, timedelta
import sickbeard
import generic
import sickbeard.encodingKludge as ek
from sickbeard import classes, logger, helpers, exceptions, show_name_helpers
from sickbeard import tvcache
from sickbeard.common import Quality
from sickbeard.exceptions import ex
from lib.dateutil.parser import parse as parseDate
class NewzbinDownloader(urllib.FancyURLopener):
def __init__(self):
urllib.FancyURLopener.__init__(self)
def http_error_default(self, url, fp, errcode, errmsg, headers):
# if newzbin is throttling us, wait seconds and try again
if errcode == 400:
newzbinErrCode = int(headers.getheader('X-DNZB-RCode'))
if newzbinErrCode == 450:
rtext = str(headers.getheader('X-DNZB-RText'))
result = re.search("wait (\d+) seconds", rtext)
logger.log("Newzbin throttled our NZB downloading, pausing for " + result.group(1) + "seconds")
time.sleep(int(result.group(1)))
raise exceptions.NewzbinAPIThrottled()
elif newzbinErrCode == 401:
raise exceptions.AuthException("Newzbin username or password incorrect")
elif newzbinErrCode == 402:
raise exceptions.AuthException("Newzbin account not premium status, can't download NZBs")
class NewzbinProvider(generic.NZBProvider):
def __init__(self):
generic.NZBProvider.__init__(self, "Newzbin")
self.supportsBacklog = True
self.cache = NewzbinCache(self)
self.url = 'https://www.newzbin2.es/'
self.NEWZBIN_DATE_FORMAT = '%a, %d %b %Y %H:%M:%S %Z'
def isEnabled(self):
return sickbeard.NEWZBIN
def getQuality(self, item, anime=False):
attributes = item.report[0]
attr_dict = {}
for attribute in attributes.getElementsByTagName('report:attribute'):
cur_attr = attribute.getAttribute('type')
cur_attr_value = helpers.get_xml_text(attribute)
if cur_attr not in attr_dict:
attr_dict[cur_attr] = [cur_attr_value]
else:
attr_dict[cur_attr].append(cur_attr_value)
logger.log("Finding quality of item based on attributes " + str(attr_dict), logger.DEBUG)
if self._is_SDTV(attr_dict):
quality = Quality.SDTV
elif self._is_SDDVD(attr_dict):
quality = Quality.SDDVD
elif self._is_HDTV(attr_dict):
quality = Quality.HDTV
elif self._is_WEBDL(attr_dict):
quality = Quality.HDWEBDL
elif self._is_720pBluRay(attr_dict):
quality = Quality.HDBLURAY
elif self._is_1080pBluRay(attr_dict):
quality = Quality.FULLHDBLURAY
else:
quality = Quality.UNKNOWN
logger.log("Resulting quality: " + str(quality), logger.DEBUG)
return quality
def _is_SDTV(self, attrs):
# Video Fmt: (XviD, DivX, H.264/x264), NOT 720p, NOT 1080p, NOT 1080i
video_fmt = 'Video Fmt' in attrs and (
'XviD' in attrs['Video Fmt'] or 'DivX' in attrs['Video Fmt'] or 'H.264/x264' in attrs['Video Fmt']) \
and ('720p' not in attrs['Video Fmt']) \
and ('1080p' not in attrs['Video Fmt']) \
and ('1080i' not in attrs['Video Fmt'])
# Source: TV Cap or HDTV or (None)
source = 'Source' not in attrs or 'TV Cap' in attrs['Source'] or 'HDTV' in attrs['Source']
# Subtitles: (None)
subs = 'Subtitles' not in attrs
return video_fmt and source and subs
def _is_SDDVD(self, attrs):
# Video Fmt: (XviD, DivX, H.264/x264), NOT 720p, NOT 1080p, NOT 1080i
video_fmt = 'Video Fmt' in attrs and (
'XviD' in attrs['Video Fmt'] or 'DivX' in attrs['Video Fmt'] or 'H.264/x264' in attrs['Video Fmt']) \
and ('720p' not in attrs['Video Fmt']) \
and ('1080p' not in attrs['Video Fmt']) \
and ('1080i' not in attrs['Video Fmt'])
# Source: DVD
source = 'Source' in attrs and 'DVD' in attrs['Source']
# Subtitles: (None)
subs = 'Subtitles' not in attrs
return video_fmt and source and subs
def _is_HDTV(self, attrs):
# Video Fmt: H.264/x264, 720p
video_fmt = 'Video Fmt' in attrs and ('H.264/x264' in attrs['Video Fmt']) \
and ('720p' in attrs['Video Fmt'])
# Source: TV Cap or HDTV or (None)
source = 'Source' not in attrs or 'TV Cap' in attrs['Source'] or 'HDTV' in attrs['Source']
# Subtitles: (None)
subs = 'Subtitles' not in attrs
return video_fmt and source and subs
def _is_WEBDL(self, attrs):
# Video Fmt: H.264/x264, 720p
video_fmt = 'Video Fmt' in attrs and ('H.264/x264' in attrs['Video Fmt']) \
and ('720p' in attrs['Video Fmt'])
# Source: WEB-DL
source = 'Source' in attrs and 'WEB-DL' in attrs['Source']
# Subtitles: (None)
subs = 'Subtitles' not in attrs
return video_fmt and source and subs
def _is_720pBluRay(self, attrs):
# Video Fmt: H.264/x264, 720p
video_fmt = 'Video Fmt' in attrs and ('H.264/x264' in attrs['Video Fmt']) \
and ('720p' in attrs['Video Fmt'])
# Source: Blu-ray or HD-DVD
source = 'Source' in attrs and ('Blu-ray' in attrs['Source'] or 'HD-DVD' in attrs['Source'])
return video_fmt and source
def _is_1080pBluRay(self, attrs):
# Video Fmt: H.264/x264, 1080p
video_fmt = 'Video Fmt' in attrs and ('H.264/x264' in attrs['Video Fmt']) \
and ('1080p' in attrs['Video Fmt'])
# Source: Blu-ray or HD-DVD
source = 'Source' in attrs and ('Blu-ray' in attrs['Source'] or 'HD-DVD' in attrs['Source'])
return video_fmt and source
def getIDFromURL(self, url):
id_regex = re.escape(self.url) + 'browse/post/(\d+)/'
id_match = re.match(id_regex, url)
if not id_match:
return None
else:
return id_match.group(1)
def downloadResult(self, nzb):
id = self.getIDFromURL(nzb.url)
if not id:
logger.log("Unable to get an ID from " + str(nzb.url) + ", can't download from Newzbin's API", logger.ERROR)
return False
logger.log("Downloading an NZB from newzbin with id " + id)
fileName = ek.ek(os.path.join, sickbeard.NZB_DIR, helpers.sanitizeFileName(nzb.name) + '.nzb')
logger.log("Saving to " + fileName)
urllib._urlopener = NewzbinDownloader()
params = urllib.urlencode(
{"username": sickbeard.NEWZBIN_USERNAME, "password": sickbeard.NEWZBIN_PASSWORD, "reportid": id})
try:
urllib.urlretrieve(self.url + "api/dnzb/", fileName, data=params)
except exceptions.NewzbinAPIThrottled:
logger.log("Done waiting for Newzbin API throttle limit, starting downloads again")
self.downloadResult(nzb)
except (urllib.ContentTooShortError, IOError), e:
logger.log("Error downloading NZB: " + str(sys.exc_info()) + " - " + ex(e), logger.ERROR)
return False
return True
def _get_season_search_strings(self, ep_obj):
return ['^' + x for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]
def _get_episode_search_strings(self, ep_obj, add_string=''):
return ['^' + x for x in show_name_helpers.makeSceneSearchString(self.show, ep_obj)]
def _doSearch(self, searchStr, show=None, age=None):
data = self._getRSSData(searchStr.encode('utf-8'))
item_list = []
try:
items = data.entries
except Exception, e:
logger.log("Error trying to load Newzbin RSS feed: " + ex(e), logger.ERROR)
return []
for cur_item in items:
title = cur_item.title
if title == 'Feeds Error':
raise exceptions.AuthException("The feed wouldn't load, probably because of invalid auth info")
if sickbeard.USENET_RETENTION is not None:
try:
dateString = helpers.get_xml_text(cur_item.getElementsByTagName('report:postdate')[0])
# use the parse (imported as parseDate) function from the dateutil lib
# and we have to remove the timezone info from it because the retention_date will not have one
# and a comparison of them is not possible
post_date = parseDate(dateString).replace(tzinfo=None)
retention_date = datetime.now() - timedelta(days=sickbeard.USENET_RETENTION)
if post_date < retention_date:
logger.log(u"Date " + str(post_date) + " is out of retention range, skipping", logger.DEBUG)
continue
except Exception, e:
logger.log("Error parsing date from Newzbin RSS feed: " + str(e), logger.ERROR)
continue
item_list.append(cur_item)
return item_list
def _getRSSData(self, search=None):
params = {
'searchaction': 'Search',
'fpn': 'p',
'category': 8,
'u_nfo_posts_only': 0,
'u_url_posts_only': 0,
'u_comment_posts_only': 0,
'u_show_passworded': 0,
'u_v3_retention': 0,
'ps_rb_video_format': 3082257,
'ps_rb_language': 4096,
'sort': 'date',
'order': 'desc',
'u_post_results_amt': 50,
'feed': 'rss',
'hauth': 1,
}
if search:
params['q'] = search + " AND "
else:
params['q'] = ''
params['q'] += 'Attr:Lang~Eng AND NOT Attr:VideoF=DVD'
url = self.url + "search/?%s" % urllib.urlencode(params)
logger.log("Newzbin search URL: " + url, logger.DEBUG)
return self.cache.getRSSFeed(url)
def _checkAuth(self):
if sickbeard.NEWZBIN_USERNAME in (None, "") or sickbeard.NEWZBIN_PASSWORD in (None, ""):
raise exceptions.AuthException("Newzbin authentication details are empty, check your config")
class NewzbinCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll Newzbin every 10 mins max
self.minTime = 1
def _getDailyData(self):
return self.provider._getRSSData().entries
def _parseItem(self, item):
(title, url) = self.provider._get_title_and_url(item)
if title == 'Feeds Error':
logger.log("There's an error in the feed, probably bad auth info", logger.DEBUG)
raise exceptions.AuthException("Invalid Newzbin username/password")
if not title or not url:
logger.log(
"The XML returned from the " + self.provider.name + " feed is incomplete, this result is unusable",
logger.ERROR)
return
logger.log(u"RSS Feed provider: [" + self.provider.name + "] Attempting to add item to cache: " + title, logger.DEBUG)
self._addCacheEntry(title, url)
provider = NewzbinProvider()

View file

@ -137,7 +137,12 @@ class NyaaCache(tvcache.TVCache):
logger.log(u"NyaaTorrents cache update URL: " + url, logger.DEBUG)
return self.getRSSFeed(url).entries
data = self.getRSSFeed(url)
if data and 'entries' in data:
return data.entries
else:
return []
provider = NyaaProvider()

View file

@ -1,162 +0,0 @@
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import datetime
import re
import time
import urllib
from xml.dom.minidom import parseString
import sickbeard
import generic
from sickbeard import classes, show_name_helpers, helpers
from sickbeard import exceptions, logger
from sickbeard import tvcache
from sickbeard.exceptions import ex
class NZBsProvider(generic.NZBProvider):
def __init__(self):
generic.NZBProvider.__init__(self, "NZBs.org Old")
self.supportsBacklog = True
self.cache = NZBsCache(self)
self.url = 'https://secure.nzbs.org/'
def isEnabled(self):
return sickbeard.NZBS
def _checkAuth(self):
if sickbeard.NZBS_UID in (None, "") or sickbeard.NZBS_HASH in (None, ""):
raise exceptions.AuthException("NZBs.org authentication details are empty, check your config")
def _get_season_search_strings(self, ep_obj):
return ['^' + x for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]
def _get_episode_search_strings(self, ep_obj, add_string=''):
return ['^' + x for x in show_name_helpers.makeSceneSearchString(self.show, ep_obj)]
def _doSearch(self, curString, show=None, age=None):
curString = curString.replace('.', ' ')
params = {"action": "search",
"q": curString.encode('utf-8'),
"dl": 1,
"i": sickbeard.NZBS_UID,
"h": sickbeard.NZBS_HASH,
"age": sickbeard.USENET_RETENTION,
"num": 100,
"type": 1}
searchURL = self.url + "rss.php?" + urllib.urlencode(params)
logger.log(u"Search string: " + searchURL, logger.DEBUG)
data = self.cache.getRSSFeed(searchURL)
# Pause to avoid 503's
time.sleep(5)
if data is None:
logger.log(u"Error trying to load NZBs.org RSS feed: " + searchURL, logger.ERROR)
return []
items = data.entries
results = []
for curItem in items:
(title, url) = self._get_title_and_url(curItem)
if not title or not url:
logger.log(
u"The XML returned from the NZBs.org RSS feed is incomplete, this result is unusable: " + data,
logger.ERROR)
continue
if "&i=" not in url and "&h=" not in url:
raise exceptions.AuthException(
"The NZBs.org result URL has no auth info which means your UID/hash are incorrect, check your config")
results.append(curItem)
return results
def findPropers(self, date=None):
results = []
for curString in (".PROPER.", ".REPACK."):
for curResult in self._doSearch(curString):
(title, url) = self._get_title_and_url(curResult)
pubDate_node = curResult.getElementsByTagName('pubDate')[0]
pubDate = helpers.get_xml_text(pubDate_node)
match = re.search('(\w{3}, \d{1,2} \w{3} \d{4} \d\d:\d\d:\d\d) [\+\-]\d{4}', pubDate)
if not match:
continue
resultDate = datetime.datetime.strptime(match.group(1), "%a, %d %b %Y %H:%M:%S")
if date is None or resultDate > date:
results.append(classes.Proper(title, url, resultDate, self.show))
return results
class NZBsCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll NZBs.org every 15 minutes max
self.minTime = 15
def _getRSSData(self):
url = self.provider.url + 'rss.php?'
urlArgs = {'type': 1,
'dl': 1,
'num': 100,
'i': sickbeard.NZBS_UID,
'h': sickbeard.NZBS_HASH,
'age': sickbeard.USENET_RETENTION}
url += urllib.urlencode(urlArgs)
logger.log(u"NZBs cache update URL: " + url, logger.DEBUG)
return self.provider.getURL(url)
def _checkItemAuth(self, title, url):
if "&i=" not in url and "&h=" not in url:
raise exceptions.AuthException(
"The NZBs.org result URL has no auth info which means your UID/hash are incorrect, check your config")
provider = NZBsProvider()

View file

@ -1,116 +0,0 @@
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import urllib
import generic
import sickbeard
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from sickbeard import exceptions, logger
from sickbeard import tvcache, show_name_helpers
class NZBsRUSProvider(generic.NZBProvider):
def __init__(self):
generic.NZBProvider.__init__(self, "NZBs'R'US")
self.cache = NZBsRUSCache(self)
self.url = 'https://www.nzbsrus.com/'
self.supportsBacklog = True
def isEnabled(self):
return sickbeard.NZBSRUS
def _checkAuth(self):
if sickbeard.NZBSRUS_UID in (None, "") or sickbeard.NZBSRUS_HASH in (None, ""):
raise exceptions.AuthException("NZBs'R'US authentication details are empty, check your config")
def _get_season_search_strings(self, ep_obj):
return ['^' + x for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]
def _get_episode_search_strings(self, ep_obj, add_string=''):
return ['^' + x for x in show_name_helpers.makeSceneSearchString(self.show, ep_obj)]
def _doSearch(self, search, show=None, age=None):
params = {'uid': sickbeard.NZBSRUS_UID,
'key': sickbeard.NZBSRUS_HASH,
'xml': 1,
'age': sickbeard.USENET_RETENTION,
'lang0': 1, # English only from CouchPotato
'lang1': 1,
'lang3': 1,
'c91': 1, # TV:HD
'c104': 1, # TV:SD-x264
'c75': 1, # TV:XviD
'searchtext': search}
if not params['age']:
params['age'] = 500
searchURL = self.url + 'api.php?' + urllib.urlencode(params)
logger.log(u"NZBS'R'US search url: " + searchURL, logger.DEBUG)
data = self.cache.getRSSFeed(searchURL)
if not data:
return []
items = data.entries
if not len(items) > 0:
logger.log(u"Error trying to parse NZBS'R'US XML data.", logger.ERROR)
logger.log(u"RSS data: " + data, logger.DEBUG)
return []
return items
def _get_title_and_url(self, item):
if item.title: # RSS feed
title = item.title
url = item.link
else: # API item
title = item.name
nzbID = item.id
key = item.key
url = self.url + 'nzbdownload_rss.php' + '/' + \
nzbID + '/' + sickbeard.NZBSRUS_UID + '/' + key + '/'
return (title, url)
class NZBsRUSCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll NZBs'R'US every 15 minutes max
self.minTime = 15
def _getDailyData(self):
url = self.provider.url + 'rssfeed.php?'
urlArgs = {'cat': '91,75,104', # HD,XviD,SD-x264
'i': sickbeard.NZBSRUS_UID,
'h': sickbeard.NZBSRUS_HASH}
url += urllib.urlencode(urlArgs)
logger.log(u"NZBs'R'US cache update URL: " + url, logger.DEBUG)
return self.getRSSFeed(url).entries
def _checkAuth(self, data):
return data != 'Invalid Link'
provider = NZBsRUSProvider()

View file

@ -164,9 +164,11 @@ class OmgwtfnzbsCache(tvcache.TVCache):
logger.log(self.provider.name + u" cache update URL: " + rss_url, logger.DEBUG)
return self.getRSSFeed(rss_url).entries
data = self.getRSSFeed(rss_url)
def _checkAuth(self, data):
return self.provider._checkAuthFromData(data)
if data and 'entries' in data:
return data.entries
else:
return []
provider = OmgwtfnzbsProvider()

View file

@ -1,253 +0,0 @@
# Author: Mr_Orange <mr_orange@hotmail.it>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import sys
import os
import traceback
import urllib, urlparse
import re
import datetime
import sickbeard
import generic
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import helpers
from sickbeard import db
from sickbeard import classes
from sickbeard.show_name_helpers import allPossibleShowNames, sanitizeSceneName
from sickbeard.exceptions import ex
from sickbeard import encodingKludge as ek
from sickbeard import clients
from lib import requests
from lib.requests import exceptions
from sickbeard.bs4_parser import BS4Parser
from lib.unidecode import unidecode
class PublicHDProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "PublicHD")
self.supportsBacklog = True
self.enabled = False
self.ratio = None
self.minseed = None
self.minleech = None
self.cache = PublicHDCache(self)
self.url = 'http://phdproxy.com/'
self.searchurl = self.url + 'index.php?page=torrents&search=%s&active=0&category=%s&order=5&by=2' #order by seed
self.categories = {'Season': ['23'], 'Episode': ['7', '14', '24'], 'RSS': ['7', '14', '23', '24']}
def isEnabled(self):
return self.enabled
def imageName(self):
return 'publichd.png'
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + str(ep_obj.airdate).split('-')[0]
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season) #1) showName SXX -SXXE
search_string['Season'].append(ep_string)
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' Season ' + str(ep_obj.airdate).split('-')[0]
else:
ep_string = show_name + ' Season ' + str(ep_obj.scene_season) #2) showName Season X
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
if self.show.air_by_date:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
else:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode}
for x in add_string.split('|'):
to_search = re.sub('\s+', ' ', ep_string + ' %s' % x)
search_string['Episode'].append(to_search)
return [search_string]
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_params.keys():
for search_string in search_params[mode]:
if mode == 'RSS':
searchURL = self.url + 'index.php?page=torrents&active=1&category=%s' % (
';'.join(self.categories[mode]))
logger.log(u"PublicHD cache update URL: " + searchURL, logger.DEBUG)
else:
searchURL = self.searchurl % (
urllib.quote(unidecode(search_string)), ';'.join(self.categories[mode]))
logger.log(u"Search string: " + searchURL, logger.DEBUG)
html = self.getURL(searchURL)
if not html:
continue
#remove unneccecary <option> lines which are slowing down BeautifulSoup
optreg = re.compile(r'<option.*</option>')
html = os.linesep.join([s for s in html.splitlines() if not optreg.search(s)])
try:
with BS4Parser(html, features=["html5lib", "permissive"]) as html:
torrent_table = html.find('table', attrs={'id': 'torrbg'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
#Continue only if one Release is found
if len(torrent_rows) < 2:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
logger.DEBUG)
continue
for tr in torrent_rows[1:]:
try:
link = self.url + tr.find(href=re.compile('page=torrent-details'))['href']
title = tr.find(lambda x: x.has_attr('title')).text.replace('_', '.')
url = tr.find(href=re.compile('magnet+'))['href']
seeders = int(tr.find_all('td', {'class': 'header'})[4].text)
leechers = int(tr.find_all('td', {'class': 'header'})[5].text)
except (AttributeError, TypeError):
continue
if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
continue
if not title or not url:
continue
item = title, url, link, seeders, leechers
items[mode].append(item)
except Exception, e:
logger.log(u"Failed to parsing " + self.name + " Traceback: " + traceback.format_exc(),
logger.ERROR)
#For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url, id, seeders, leechers = item
if title:
title = u'' + title
title = title.replace(' ', '.')
if url:
url = url.replace('&amp;', '&')
return (title, url)
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
return results
def seedRatio(self):
return self.ratio
class PublicHDCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll ThePirateBay every 10 minutes max
self.minTime = 20
def _getDailyData(self):
search_params = {'RSS': ['rss']}
return self.provider._doSearch(search_params)
provider = PublicHDProvider()

View file

@ -157,6 +157,11 @@ class TorrentRssCache(tvcache.TVCache):
request_headers = None
if self.provider.cookies:
request_headers = { 'Cookie': self.provider.cookies }
request_headers = { 'Cookie': self.provider.cookies }
return self.getRSSFeed(self.provider.url, request_headers=request_headers).entries
data = self.getRSSFeed(self.provider.url, request_headers=request_headers)
if data and 'entries' in data:
return data.entries
else:
return []

View file

@ -196,7 +196,7 @@ class ThePirateBayProvider(generic.TorrentProvider):
if self.show.air_by_date:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|')
str(ep_obj.airdate).replace('-', ' ')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(allPossibleShowNames(self.show)):

View file

@ -45,6 +45,7 @@ class TorrentLeechProvider(generic.TorrentProvider):
'detail': 'https://torrentleech.org/torrent/%s',
'search': 'https://torrentleech.org/torrents/browse/index/query/%s/categories/%s',
'download': 'https://torrentleech.org%s',
'index': 'https://torrentleech.org/torrents/browse/index/categories/%s',
}
def __init__(self):
@ -163,7 +164,10 @@ class TorrentLeechProvider(generic.TorrentProvider):
if isinstance(search_string, unicode):
search_string = unidecode(search_string)
searchURL = self.urls['search'] % (search_string, self.categories)
if mode == 'RSS':
searchURL = self.urls['index'] % self.categories
else:
searchURL = self.urls['search'] % (search_string, self.categories)
logger.log(u"Search string: " + searchURL, logger.DEBUG)
@ -203,7 +207,7 @@ class TorrentLeechProvider(generic.TorrentProvider):
continue
item = title, download_url, id, seeders, leechers
logger.log(u"Found result: " + title + "(" + searchURL + ")", logger.DEBUG)
logger.log(u"Found result: " + title + "(" + download_url + ")", logger.DEBUG)
items[mode].append(item)

View file

@ -61,11 +61,8 @@ class TvTorrentsProvider(generic.TorrentProvider):
return True
def _checkAuthFromData(self, data):
if data is None or data.feed is None:
return self._checkAuth()
description_text = data.feed.title
if data.feed.title:
description_text = data.feed.title
if "User can't be found" in description_text or "Invalid Hash" in description_text:
logger.log(u"Incorrect authentication credentials for " + self.name + " : " + str(description_text),
@ -93,10 +90,16 @@ class TvTorrentsCache(tvcache.TVCache):
rss_url = self.provider.url + 'RssServlet?digest=' + provider.digest + '&hash=' + provider.hash + '&fname=true&exclude=(' + ignore_regex + ')'
logger.log(self.provider.name + u" cache update URL: " + rss_url, logger.DEBUG)
return self.getRSSFeed(rss_url)
data = self.getRSSFeed(rss_url)
if not self.provider._checkAuthFromData(data):
return []
if data and 'entries' in data:
return data['entries']
else:
return []
def _checkAuth(self, data):
return self.provider._checkAuthFromData(data)
provider = TvTorrentsProvider()

View file

@ -177,7 +177,7 @@ def filter_release_name(name, filter_words):
Returns: False if the release name is OK, True if it contains one of the filter_words
"""
if filter_words:
filters = [re.compile('(^|[\W_])%s($|[\W_])' % filter.strip(), re.I) for filter in filter_words.split(',')]
filters = [re.compile('.*%s.*' % filter.strip(), re.I) for filter in filter_words.split(',')]
for regfilter in filters:
if regfilter.search(name):
logger.log(u"" + name + " contains pattern: " + regfilter.pattern, logger.DEBUG)

View file

@ -105,15 +105,15 @@ class TVCache():
def _getDailyData(self):
return None
def _checkAuth(self, data):
return True
def _checkAuth(self):
return self.provider._checkAuth()
def _checkItemAuth(self, title, url):
return True
def updateCache(self):
if self.shouldUpdate() and self._checkAuth(None):
if self.shouldUpdate() and self._checkAuth():
# as long as the http request worked we count this as an update
data = self._getDailyData()
if not data:
@ -126,20 +126,16 @@ class TVCache():
self.setLastUpdate()
# parse data
if self._checkAuth(data):
cl = []
for item in data:
title, url = self.provider._get_title_and_url(item)
ci = self._parseItem(title, url)
if ci is not None:
cl.append(ci)
cl = []
for item in data:
title, url = self.provider._get_title_and_url(item)
ci = self._parseItem(title, url)
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
else:
raise AuthException(
u"Your authentication credentials for " + self.provider.name + " are incorrect, check your config")
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
return []

View file

@ -758,4 +758,4 @@ class SourceUpdateManager(UpdateManager):
def list_remote_branches(self):
gh = github.GitHub(self.github_repo_user, self.github_repo, self.branch)
return [x.name for x in gh.branches() if x]
return [x['name'] for x in gh.branches() if x and 'name' in x]

View file

@ -1601,7 +1601,7 @@ class ConfigSearch(MainHandler):
def saveSearch(self, use_nzbs=None, use_torrents=None, nzb_dir=None, sab_username=None, sab_password=None,
sab_apikey=None, sab_category=None, sab_host=None, nzbget_username=None, nzbget_password=None,
nzbget_category=None, nzbget_host=None, nzbget_use_https=None, dailysearch_frequency=None,
nzbget_category=None, nzbget_priority=100, nzbget_host=None, nzbget_use_https=None, dailysearch_frequency=None,
nzb_method=None, torrent_method=None, usenet_retention=None, backlog_frequency=None,
download_propers=None, check_propers_interval=None, allow_high_priority=None,
backlog_startup=None, dailysearch_startup=None,
@ -1648,6 +1648,7 @@ class ConfigSearch(MainHandler):
sickbeard.NZBGET_CATEGORY = nzbget_category
sickbeard.NZBGET_HOST = config.clean_host(nzbget_host)
sickbeard.NZBGET_USE_HTTPS = config.checkbox_to_value(nzbget_use_https)
sickbeard.NZBGET_PRIORITY = int(nzbget_priority)
sickbeard.TORRENT_USERNAME = torrent_username
sickbeard.TORRENT_PASSWORD = torrent_password
@ -2362,7 +2363,7 @@ class ConfigNotifications(MainHandler):
sickbeard.TRAKT_REMOVE_WATCHLIST = config.checkbox_to_value(trakt_remove_watchlist)
sickbeard.TRAKT_REMOVE_SERIESLIST = config.checkbox_to_value(trakt_remove_serieslist)
sickbeard.TRAKT_USE_WATCHLIST = config.checkbox_to_value(trakt_use_watchlist)
sickbeard.TRAKT_METHOD_ADD = trakt_method_add
sickbeard.TRAKT_METHOD_ADD = int(trakt_method_add)
sickbeard.TRAKT_START_PAUSED = config.checkbox_to_value(trakt_start_paused)
sickbeard.TRAKT_USE_RECOMMENDED = config.checkbox_to_value(trakt_use_recommended)
sickbeard.TRAKT_SYNC = config.checkbox_to_value(trakt_sync)