diff --git a/CHANGES.md b/CHANGES.md
index faf1a9d2..d905a383 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,6 +1,7 @@
### 0.17.0 (2018-xx-xx xx:xx:xx UTC)
* Update Tornado Web Server 5.0.1 (35a538f) to 5.0.1 (2b2a220a)
+* Add Xspeeds torrent provider
[develop changelog]
diff --git a/sickbeard/providers/__init__.py b/sickbeard/providers/__init__.py
index 111f693b..f7f0858c 100755
--- a/sickbeard/providers/__init__.py
+++ b/sickbeard/providers/__init__.py
@@ -31,7 +31,7 @@ from . import alpharatio, alphareign, beyondhd, bithdtv, bitmetv, blutopia, btn,
iptorrents, limetorrents, magnetdl, morethan, nebulance, ncore, nyaa, pisexy, potuk, pretome, privatehd, ptf, \
rarbg, revtt, scenehd, scenetime, shazbat, showrss, skytorrents, speedcd, \
thepiratebay, torlock, torrentday, torrenting, torrentleech, \
- torrentz2, tvchaosuk, wop, zooqle
+ torrentz2, tvchaosuk, wop, xspeeds, zooqle
# anime
from . import anizb, tokyotoshokan
# custom
@@ -89,6 +89,7 @@ __all__ = ['omgwtfnzbs',
'torrentz2',
'tvchaosuk',
'wop',
+ 'xspeeds',
'zooqle',
'tokyotoshokan',
]
diff --git a/sickbeard/providers/generic.py b/sickbeard/providers/generic.py
index de2b9051..f2dc431e 100644
--- a/sickbeard/providers/generic.py
+++ b/sickbeard/providers/generic.py
@@ -1109,9 +1109,9 @@ class GenericProvider(object):
"""
if not self.should_skip():
str1, thing, str3 = (('', '%s item' % mode.lower(), ''), (' usable', 'proper', ' found'))['Propers' == mode]
- logger.log(u'%s %s in response from %s' % (('No' + str1, count)[0 < count], (
+ logger.log((u'%s %s in response from %s' % (('No' + str1, count)[0 < count], (
'%s%s%s%s' % (('', 'freeleech ')[getattr(self, 'freeleech', False)], thing, maybe_plural(count), str3)),
- re.sub('(\s)\s+', r'\1', url)))
+ re.sub('(\s)\s+', r'\1', url))).replace('%%', '%'))
def check_auth_cookie(self):
diff --git a/sickbeard/providers/xspeeds.py b/sickbeard/providers/xspeeds.py
new file mode 100644
index 00000000..90481b7e
--- /dev/null
+++ b/sickbeard/providers/xspeeds.py
@@ -0,0 +1,209 @@
+# coding=utf-8
+#
+# Author: SickGear
+#
+# This file is part of SickGear.
+#
+# SickGear is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickGear is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickGear. If not, see .
+
+import re
+import traceback
+
+from . import generic
+from sickbeard import logger
+from sickbeard.bs4_parser import BS4Parser
+from sickbeard.helpers import tryInt, has_anime
+from lib.unidecode import unidecode
+
+
+class XspeedsProvider(generic.TorrentProvider):
+
+ def __init__(self):
+
+ generic.TorrentProvider.__init__(self, 'Xspeeds')
+
+ self.url_base = 'https://www.xspeeds.eu/'
+ self.urls = {'config_provider_home_uri': self.url_base,
+ 'login_action': self.url_base + 'login.php',
+ 'edit': self.url_base + 'usercp.php?act=edit_details',
+ 'search': self.url_base + 'browse.php'}
+
+ self.categories = {'Season': [94, 21], 'Episode': [91, 74, 54, 20, 47, 16], 'anime': [70]}
+ self.categories['Cache'] = self.categories['Season'] + self.categories['Episode']
+
+ self.url = self.urls['config_provider_home_uri']
+
+ self.username, self.password, self.freeleech, self.minseed, self.minleech = 5 * [None]
+
+ def _authorised(self, **kwargs):
+
+ return super(XspeedsProvider, self)._authorised(
+ logged_in=(lambda y=None: self.has_all_cookies(pre='c_secure_')), post_params={'form_tmpl': True})
+
+ def _search_provider(self, search_params, **kwargs):
+
+ results = []
+ if not self._authorised():
+ return results
+
+ items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}
+
+ rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'info': 'detail', 'get': 'download', 'fl': 'free'}.items())
+ for mode in search_params.keys():
+ save_url, restore = self._set_categories(mode)
+ if self.should_skip():
+ return results
+ for search_string in search_params[mode]:
+ search_string = search_string.replace(u'£', '%')
+ search_string = re.sub('[\s\.]+', '%', search_string)
+ search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
+
+ kwargs = dict(post_data={'keywords': search_string, 'do': 'quick_sort', 'page': '0',
+ 'category': '0', 'search_type': 't_name', 'sort': 'added',
+ 'order': 'desc', 'daysprune': '-1'})
+
+ html = self.get_url(self.urls['search'], **kwargs)
+ if self.should_skip():
+ return results
+
+ cnt = len(items[mode])
+ try:
+ if not html or self._has_no_results(html):
+ raise generic.HaltParseException
+
+ with BS4Parser(html, 'html.parser') as soup:
+ torrent_table = soup.find('table', id='sortabletable')
+ torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')
+ get_detail = True
+
+ if 2 > len(torrent_rows):
+ raise generic.HaltParseException
+
+ head = None
+ for tr in torrent_rows[1:]:
+ cells = tr.find_all('td')
+ if 6 > len(cells):
+ continue
+ try:
+ head = head if None is not head else self._header_row(tr)
+ seeders, leechers, size = [tryInt(n, n) for n in [
+ cells[head[x]].get_text().strip() for x in 'seed', 'leech', 'size']]
+ if self._peers_fail(mode, seeders, leechers) \
+ or self.freeleech and None is cells[1].find('img', title=rc['fl']):
+ continue
+
+ info = tr.find('a', href=rc['info'])
+ title = (tr.find('div', class_='tooltip-content').get_text() or info.get_text()).strip()
+ title = re.findall('(?m)(^[^\r\n]+)', title)[0]
+ download_url = self._link(tr.find('a', href=rc['get'])['href'])
+ except (StandardError, Exception):
+ continue
+
+ if get_detail and title.endswith('...'):
+ try:
+ with BS4Parser(self.get_url('%s%s' % (
+ self.urls['config_provider_home_uri'], info['href'].lstrip('/').replace(
+ self.urls['config_provider_home_uri'], ''))),
+ 'html.parser') as soup_detail:
+ title = soup_detail.find(
+ 'td', class_='thead', attrs={'colspan': '3'}).get_text().strip()
+ title = re.findall('(?m)(^[^\r\n]+)', title)[0]
+ except IndexError:
+ continue
+ except (StandardError, Exception):
+ get_detail = False
+
+ title = self.regulate_title(title)
+ if download_url and title:
+ items[mode].append((title, download_url, seeders, self._bytesizer(size)))
+
+ except generic.HaltParseException:
+ pass
+ except (StandardError, Exception):
+ logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)
+
+ self._log_search(mode, len(items[mode]) - cnt,
+ ('search string: ' + search_string.replace('%', '%%'), self.name)['Cache' == mode])
+
+ if mode in 'Season' and len(items[mode]):
+ break
+
+ if save_url:
+ self.get_url(save_url, post_data=restore)
+
+ results = self._sort_seeding(mode, results + items[mode])
+
+ return results
+
+ def _set_categories(self, mode):
+ # set up categories
+ html = self.get_url(self.urls['edit'])
+ if self.should_skip():
+ return None, None
+ try:
+ form = re.findall('(?is).*(
)', html)[0]
+ save_url = self._link(re.findall('(?i)action="([^"]+?)"', form)[0])
+ tags = re.findall(r'(?is)(]*?name=[\'"][^\'"]+[^>]*)', form)
+ except (StandardError, Exception):
+ return None, None
+
+ cats, params = [], {}
+ attrs = [[(re.findall(r'(?is)%s=[\'"]([^\'"]+)' % attr, c) or [''])[0]
+ for attr in ['type', 'name', 'value', 'checked']] for c in tags]
+ for itype, name, value, checked in attrs:
+ if 'cat' == name[0:3] and 'checkbox' == itype.lower():
+ if any(checked):
+ try:
+ cats += [re.findall('(\d+)[^\d]*$', name)[0]]
+ except IndexError:
+ pass
+ elif 'hidden' == itype.lower() or 'nothing' in name or \
+ (itype.lower() in ['checkbox', 'radio'] and any(checked)):
+ params[name] = value
+ selects = re.findall('(?is)()', form)
+ for select in selects:
+ name, values, index = None, None, 0
+ try:
+ name = re.findall('(?is)