mirror of
https://github.com/SickGear/SickGear.git
synced 2024-12-01 00:43:37 +00:00
parent
b3246b0049
commit
8d9d62caf5
34 changed files with 964 additions and 553 deletions
18
lib/pysrt/__init__.py
Normal file
18
lib/pysrt/__init__.py
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
from pysrt.srttime import SubRipTime
|
||||||
|
from pysrt.srtitem import SubRipItem
|
||||||
|
from pysrt.srtfile import SubRipFile
|
||||||
|
from pysrt.srtexc import Error, InvalidItem, InvalidTimeString
|
||||||
|
from pysrt.version import VERSION, VERSION_STRING
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'SubRipFile', 'SubRipItem', 'SubRipFile', 'SUPPORT_UTF_32_LE',
|
||||||
|
'SUPPORT_UTF_32_BE', 'InvalidItem', 'InvalidTimeString'
|
||||||
|
]
|
||||||
|
|
||||||
|
ERROR_PASS = SubRipFile.ERROR_PASS
|
||||||
|
ERROR_LOG = SubRipFile.ERROR_LOG
|
||||||
|
ERROR_RAISE = SubRipFile.ERROR_RAISE
|
||||||
|
|
||||||
|
open = SubRipFile.open
|
||||||
|
stream = SubRipFile.stream
|
||||||
|
from_string = SubRipFile.from_string
|
218
lib/pysrt/commands.py
Normal file
218
lib/pysrt/commands.py
Normal file
|
@ -0,0 +1,218 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# pylint: disable-all
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import codecs
|
||||||
|
import shutil
|
||||||
|
import argparse
|
||||||
|
from textwrap import dedent
|
||||||
|
|
||||||
|
from chardet import detect
|
||||||
|
from pysrt import SubRipFile, SubRipTime, VERSION_STRING
|
||||||
|
|
||||||
|
def underline(string):
|
||||||
|
return "\033[4m%s\033[0m" % string
|
||||||
|
|
||||||
|
|
||||||
|
class TimeAwareArgumentParser(argparse.ArgumentParser):
|
||||||
|
|
||||||
|
RE_TIME_REPRESENTATION = re.compile(r'^\-?(\d+[hms]{0,2}){1,4}$')
|
||||||
|
|
||||||
|
def parse_args(self, args=None, namespace=None):
|
||||||
|
time_index = -1
|
||||||
|
for index, arg in enumerate(args):
|
||||||
|
match = self.RE_TIME_REPRESENTATION.match(arg)
|
||||||
|
if match:
|
||||||
|
time_index = index
|
||||||
|
break
|
||||||
|
|
||||||
|
if time_index >= 0:
|
||||||
|
args.insert(time_index, '--')
|
||||||
|
|
||||||
|
return super(TimeAwareArgumentParser, self).parse_args(args, namespace)
|
||||||
|
|
||||||
|
|
||||||
|
class SubRipShifter(object):
|
||||||
|
|
||||||
|
BACKUP_EXTENSION = '.bak'
|
||||||
|
RE_TIME_STRING = re.compile(r'(\d+)([hms]{0,2})')
|
||||||
|
UNIT_RATIOS = {
|
||||||
|
'ms': 1,
|
||||||
|
'': SubRipTime.SECONDS_RATIO,
|
||||||
|
's': SubRipTime.SECONDS_RATIO,
|
||||||
|
'm': SubRipTime.MINUTES_RATIO,
|
||||||
|
'h': SubRipTime.HOURS_RATIO,
|
||||||
|
}
|
||||||
|
DESCRIPTION = dedent("""\
|
||||||
|
Srt subtitle editor
|
||||||
|
|
||||||
|
It can either shift, split or change the frame rate.
|
||||||
|
""")
|
||||||
|
TIMESTAMP_HELP = "A timestamp in the form: [-][Hh][Mm]S[s][MSms]"
|
||||||
|
SHIFT_EPILOG = dedent("""\
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
1 minute and 12 seconds foreward (in place):
|
||||||
|
$ srt -i shift 1m12s movie.srt
|
||||||
|
|
||||||
|
half a second foreward:
|
||||||
|
$ srt shift 500ms movie.srt > othername.srt
|
||||||
|
|
||||||
|
1 second and half backward:
|
||||||
|
$ srt -i shift -1s500ms movie.srt
|
||||||
|
|
||||||
|
3 seconds backward:
|
||||||
|
$ srt -i shift -3 movie.srt
|
||||||
|
""")
|
||||||
|
RATE_EPILOG = dedent("""\
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
Convert 23.9fps subtitles to 25fps:
|
||||||
|
$ srt -i rate 23.9 25 movie.srt
|
||||||
|
""")
|
||||||
|
LIMITS_HELP = "Each parts duration in the form: [Hh][Mm]S[s][MSms]"
|
||||||
|
SPLIT_EPILOG = dedent("""\
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
For a movie in 2 parts with the first part 48 minutes and 18 seconds long:
|
||||||
|
$ srt split 48m18s movie.srt
|
||||||
|
=> creates movie.1.srt and movie.2.srt
|
||||||
|
|
||||||
|
For a movie in 3 parts of 20 minutes each:
|
||||||
|
$ srt split 20m 20m movie.srt
|
||||||
|
=> creates movie.1.srt, movie.2.srt and movie.3.srt
|
||||||
|
""")
|
||||||
|
FRAME_RATE_HELP = "A frame rate in fps (commonly 23.9 or 25)"
|
||||||
|
ENCODING_HELP = dedent("""\
|
||||||
|
Change file encoding. Useful for players accepting only latin1 subtitles.
|
||||||
|
List of supported encodings: http://docs.python.org/library/codecs.html#standard-encodings
|
||||||
|
""")
|
||||||
|
BREAK_EPILOG = dedent("""\
|
||||||
|
Break lines longer than defined length
|
||||||
|
""")
|
||||||
|
LENGTH_HELP = "Maximum number of characters per line"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.output_file_path = None
|
||||||
|
|
||||||
|
def build_parser(self):
|
||||||
|
parser = TimeAwareArgumentParser(description=self.DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter)
|
||||||
|
parser.add_argument('-i', '--in-place', action='store_true', dest='in_place',
|
||||||
|
help="Edit file in-place, saving a backup as file.bak (do not works for the split command)")
|
||||||
|
parser.add_argument('-e', '--output-encoding', metavar=underline('encoding'), action='store', dest='output_encoding',
|
||||||
|
type=self.parse_encoding, help=self.ENCODING_HELP)
|
||||||
|
parser.add_argument('-v', '--version', action='version', version='%%(prog)s %s' % VERSION_STRING)
|
||||||
|
subparsers = parser.add_subparsers(title='commands')
|
||||||
|
|
||||||
|
shift_parser = subparsers.add_parser('shift', help="Shift subtitles by specified time offset", epilog=self.SHIFT_EPILOG, formatter_class=argparse.RawTextHelpFormatter)
|
||||||
|
shift_parser.add_argument('time_offset', action='store', metavar=underline('offset'),
|
||||||
|
type=self.parse_time, help=self.TIMESTAMP_HELP)
|
||||||
|
shift_parser.set_defaults(action=self.shift)
|
||||||
|
|
||||||
|
rate_parser = subparsers.add_parser('rate', help="Convert subtitles from a frame rate to another", epilog=self.RATE_EPILOG, formatter_class=argparse.RawTextHelpFormatter)
|
||||||
|
rate_parser.add_argument('initial', action='store', type=float, help=self.FRAME_RATE_HELP)
|
||||||
|
rate_parser.add_argument('final', action='store', type=float, help=self.FRAME_RATE_HELP)
|
||||||
|
rate_parser.set_defaults(action=self.rate)
|
||||||
|
|
||||||
|
split_parser = subparsers.add_parser('split', help="Split a file in multiple parts", epilog=self.SPLIT_EPILOG, formatter_class=argparse.RawTextHelpFormatter)
|
||||||
|
split_parser.add_argument('limits', action='store', nargs='+', type=self.parse_time, help=self.LIMITS_HELP)
|
||||||
|
split_parser.set_defaults(action=self.split)
|
||||||
|
|
||||||
|
break_parser = subparsers.add_parser('break', help="Break long lines", epilog=self.BREAK_EPILOG, formatter_class=argparse.RawTextHelpFormatter)
|
||||||
|
break_parser.add_argument('length', action='store', type=int, help=self.LENGTH_HELP)
|
||||||
|
break_parser.set_defaults(action=self.break_lines)
|
||||||
|
|
||||||
|
parser.add_argument('file', action='store')
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
def run(self, args):
|
||||||
|
self.arguments = self.build_parser().parse_args(args)
|
||||||
|
if self.arguments.in_place:
|
||||||
|
self.create_backup()
|
||||||
|
self.arguments.action()
|
||||||
|
|
||||||
|
def parse_time(self, time_string):
|
||||||
|
negative = time_string.startswith('-')
|
||||||
|
if negative:
|
||||||
|
time_string = time_string[1:]
|
||||||
|
ordinal = sum(int(value) * self.UNIT_RATIOS[unit] for value, unit
|
||||||
|
in self.RE_TIME_STRING.findall(time_string))
|
||||||
|
return -ordinal if negative else ordinal
|
||||||
|
|
||||||
|
def parse_encoding(self, encoding_name):
|
||||||
|
try:
|
||||||
|
codecs.lookup(encoding_name)
|
||||||
|
except LookupError as error:
|
||||||
|
raise argparse.ArgumentTypeError(error.message)
|
||||||
|
return encoding_name
|
||||||
|
|
||||||
|
def shift(self):
|
||||||
|
self.input_file.shift(milliseconds=self.arguments.time_offset)
|
||||||
|
self.input_file.write_into(self.output_file)
|
||||||
|
|
||||||
|
def rate(self):
|
||||||
|
ratio = self.arguments.final / self.arguments.initial
|
||||||
|
self.input_file.shift(ratio=ratio)
|
||||||
|
self.input_file.write_into(self.output_file)
|
||||||
|
|
||||||
|
def split(self):
|
||||||
|
limits = [0] + self.arguments.limits + [self.input_file[-1].end.ordinal + 1]
|
||||||
|
base_name, extension = os.path.splitext(self.arguments.file)
|
||||||
|
for index, (start, end) in enumerate(zip(limits[:-1], limits[1:])):
|
||||||
|
file_name = '%s.%s%s' % (base_name, index + 1, extension)
|
||||||
|
part_file = self.input_file.slice(ends_after=start, starts_before=end)
|
||||||
|
part_file.shift(milliseconds=-start)
|
||||||
|
part_file.clean_indexes()
|
||||||
|
part_file.save(path=file_name, encoding=self.output_encoding)
|
||||||
|
|
||||||
|
def create_backup(self):
|
||||||
|
backup_file = self.arguments.file + self.BACKUP_EXTENSION
|
||||||
|
if not os.path.exists(backup_file):
|
||||||
|
shutil.copy2(self.arguments.file, backup_file)
|
||||||
|
self.output_file_path = self.arguments.file
|
||||||
|
self.arguments.file = backup_file
|
||||||
|
|
||||||
|
def break_lines(self):
|
||||||
|
split_re = re.compile(r'(.{,%i})(?:\s+|$)' % self.arguments.length)
|
||||||
|
for item in self.input_file:
|
||||||
|
item.text = '\n'.join(split_re.split(item.text)[1::2])
|
||||||
|
self.input_file.write_into(self.output_file)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def output_encoding(self):
|
||||||
|
return self.arguments.output_encoding or self.input_file.encoding
|
||||||
|
|
||||||
|
@property
|
||||||
|
def input_file(self):
|
||||||
|
if not hasattr(self, '_source_file'):
|
||||||
|
with open(self.arguments.file, 'rb') as f:
|
||||||
|
content = f.read()
|
||||||
|
encoding = detect(content).get('encoding')
|
||||||
|
encoding = self.normalize_encoding(encoding)
|
||||||
|
|
||||||
|
self._source_file = SubRipFile.open(self.arguments.file,
|
||||||
|
encoding=encoding, error_handling=SubRipFile.ERROR_LOG)
|
||||||
|
return self._source_file
|
||||||
|
|
||||||
|
@property
|
||||||
|
def output_file(self):
|
||||||
|
if not hasattr(self, '_output_file'):
|
||||||
|
if self.output_file_path:
|
||||||
|
self._output_file = codecs.open(self.output_file_path, 'w+', encoding=self.output_encoding)
|
||||||
|
else:
|
||||||
|
self._output_file = sys.stdout
|
||||||
|
return self._output_file
|
||||||
|
|
||||||
|
def normalize_encoding(self, encoding):
|
||||||
|
return encoding.lower().replace('-', '_')
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
SubRipShifter().run(sys.argv[1:])
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
26
lib/pysrt/comparablemixin.py
Normal file
26
lib/pysrt/comparablemixin.py
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
class ComparableMixin(object):
|
||||||
|
def _compare(self, other, method):
|
||||||
|
try:
|
||||||
|
return method(self._cmpkey(), other._cmpkey())
|
||||||
|
except (AttributeError, TypeError):
|
||||||
|
# _cmpkey not implemented, or return different type,
|
||||||
|
# so I can't compare with "other".
|
||||||
|
return NotImplemented
|
||||||
|
|
||||||
|
def __lt__(self, other):
|
||||||
|
return self._compare(other, lambda s, o: s < o)
|
||||||
|
|
||||||
|
def __le__(self, other):
|
||||||
|
return self._compare(other, lambda s, o: s <= o)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return self._compare(other, lambda s, o: s == o)
|
||||||
|
|
||||||
|
def __ge__(self, other):
|
||||||
|
return self._compare(other, lambda s, o: s >= o)
|
||||||
|
|
||||||
|
def __gt__(self, other):
|
||||||
|
return self._compare(other, lambda s, o: s > o)
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return self._compare(other, lambda s, o: s != o)
|
24
lib/pysrt/compat.py
Normal file
24
lib/pysrt/compat.py
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# Syntax sugar.
|
||||||
|
_ver = sys.version_info
|
||||||
|
|
||||||
|
#: Python 2.x?
|
||||||
|
is_py2 = (_ver[0] == 2)
|
||||||
|
|
||||||
|
#: Python 3.x?
|
||||||
|
is_py3 = (_ver[0] == 3)
|
||||||
|
|
||||||
|
from io import open as io_open
|
||||||
|
|
||||||
|
if is_py2:
|
||||||
|
builtin_str = str
|
||||||
|
basestring = basestring
|
||||||
|
str = unicode
|
||||||
|
open = io_open
|
||||||
|
elif is_py3:
|
||||||
|
builtin_str = str
|
||||||
|
basestring = (str, bytes)
|
||||||
|
str = str
|
||||||
|
open = open
|
31
lib/pysrt/srtexc.py
Normal file
31
lib/pysrt/srtexc.py
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
"""
|
||||||
|
Exception classes
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class Error(Exception):
|
||||||
|
"""
|
||||||
|
Pysrt's base exception
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidTimeString(Error):
|
||||||
|
"""
|
||||||
|
Raised when parser fail on bad formated time strings
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidItem(Error):
|
||||||
|
"""
|
||||||
|
Raised when parser fail to parse a sub title item
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidIndex(InvalidItem):
|
||||||
|
"""
|
||||||
|
Raised when parser fail to parse a sub title index
|
||||||
|
"""
|
||||||
|
pass
|
312
lib/pysrt/srtfile.py
Normal file
312
lib/pysrt/srtfile.py
Normal file
|
@ -0,0 +1,312 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import codecs
|
||||||
|
|
||||||
|
try:
|
||||||
|
from collections import UserList
|
||||||
|
except ImportError:
|
||||||
|
from UserList import UserList
|
||||||
|
|
||||||
|
from itertools import chain
|
||||||
|
from copy import copy
|
||||||
|
|
||||||
|
from pysrt.srtexc import Error
|
||||||
|
from pysrt.srtitem import SubRipItem
|
||||||
|
from pysrt.compat import str
|
||||||
|
|
||||||
|
BOMS = ((codecs.BOM_UTF32_LE, 'utf_32_le'),
|
||||||
|
(codecs.BOM_UTF32_BE, 'utf_32_be'),
|
||||||
|
(codecs.BOM_UTF16_LE, 'utf_16_le'),
|
||||||
|
(codecs.BOM_UTF16_BE, 'utf_16_be'),
|
||||||
|
(codecs.BOM_UTF8, 'utf_8'))
|
||||||
|
CODECS_BOMS = dict((codec, str(bom, codec)) for bom, codec in BOMS)
|
||||||
|
BIGGER_BOM = max(len(bom) for bom, encoding in BOMS)
|
||||||
|
|
||||||
|
|
||||||
|
class SubRipFile(UserList, object):
|
||||||
|
"""
|
||||||
|
SubRip file descriptor.
|
||||||
|
|
||||||
|
Provide a pure Python mapping on all metadata.
|
||||||
|
|
||||||
|
SubRipFile(items, eol, path, encoding)
|
||||||
|
|
||||||
|
items -> list of SubRipItem. Default to [].
|
||||||
|
eol -> str: end of line character. Default to linesep used in opened file
|
||||||
|
if any else to os.linesep.
|
||||||
|
path -> str: path where file will be saved. To open an existant file see
|
||||||
|
SubRipFile.open.
|
||||||
|
encoding -> str: encoding used at file save. Default to utf-8.
|
||||||
|
"""
|
||||||
|
ERROR_PASS = 0
|
||||||
|
ERROR_LOG = 1
|
||||||
|
ERROR_RAISE = 2
|
||||||
|
|
||||||
|
DEFAULT_ENCODING = 'utf_8'
|
||||||
|
|
||||||
|
def __init__(self, items=None, eol=None, path=None, encoding='utf-8'):
|
||||||
|
UserList.__init__(self, items or [])
|
||||||
|
self._eol = eol
|
||||||
|
self.path = path
|
||||||
|
self.encoding = encoding
|
||||||
|
|
||||||
|
def _get_eol(self):
|
||||||
|
return self._eol or os.linesep
|
||||||
|
|
||||||
|
def _set_eol(self, eol):
|
||||||
|
self._eol = self._eol or eol
|
||||||
|
|
||||||
|
eol = property(_get_eol, _set_eol)
|
||||||
|
|
||||||
|
def slice(self, starts_before=None, starts_after=None, ends_before=None,
|
||||||
|
ends_after=None):
|
||||||
|
"""
|
||||||
|
slice([starts_before][, starts_after][, ends_before][, ends_after]) \
|
||||||
|
-> SubRipFile clone
|
||||||
|
|
||||||
|
All arguments are optional, and should be coercible to SubRipTime
|
||||||
|
object.
|
||||||
|
|
||||||
|
It reduce the set of subtitles to those that match match given time
|
||||||
|
constraints.
|
||||||
|
|
||||||
|
The returned set is a clone, but still contains references to original
|
||||||
|
subtitles. So if you shift this returned set, subs contained in the
|
||||||
|
original SubRipFile instance will be altered too.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
>>> subs.slice(ends_after={'seconds': 20}).shift(seconds=2)
|
||||||
|
"""
|
||||||
|
clone = copy(self)
|
||||||
|
|
||||||
|
if starts_before:
|
||||||
|
clone.data = (i for i in clone.data if i.start < starts_before)
|
||||||
|
if starts_after:
|
||||||
|
clone.data = (i for i in clone.data if i.start > starts_after)
|
||||||
|
if ends_before:
|
||||||
|
clone.data = (i for i in clone.data if i.end < ends_before)
|
||||||
|
if ends_after:
|
||||||
|
clone.data = (i for i in clone.data if i.end > ends_after)
|
||||||
|
|
||||||
|
clone.data = list(clone.data)
|
||||||
|
return clone
|
||||||
|
|
||||||
|
def at(self, timestamp=None, **kwargs):
|
||||||
|
"""
|
||||||
|
at(timestamp) -> SubRipFile clone
|
||||||
|
|
||||||
|
timestamp argument should be coercible to SubRipFile object.
|
||||||
|
|
||||||
|
A specialization of slice. Return all subtiles visible at the
|
||||||
|
timestamp mark.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
>>> subs.at((0, 0, 20, 0)).shift(seconds=2)
|
||||||
|
>>> subs.at(seconds=20).shift(seconds=2)
|
||||||
|
"""
|
||||||
|
time = timestamp or kwargs
|
||||||
|
return self.slice(starts_before=time, ends_after=time)
|
||||||
|
|
||||||
|
def shift(self, *args, **kwargs):
|
||||||
|
"""shift(hours, minutes, seconds, milliseconds, ratio)
|
||||||
|
|
||||||
|
Shift `start` and `end` attributes of each items of file either by
|
||||||
|
applying a ratio or by adding an offset.
|
||||||
|
|
||||||
|
`ratio` should be either an int or a float.
|
||||||
|
Example to convert subtitles from 23.9 fps to 25 fps:
|
||||||
|
>>> subs.shift(ratio=25/23.9)
|
||||||
|
|
||||||
|
All "time" arguments are optional and have a default value of 0.
|
||||||
|
Example to delay all subs from 2 seconds and half
|
||||||
|
>>> subs.shift(seconds=2, milliseconds=500)
|
||||||
|
"""
|
||||||
|
for item in self:
|
||||||
|
item.shift(*args, **kwargs)
|
||||||
|
|
||||||
|
def clean_indexes(self):
|
||||||
|
"""
|
||||||
|
clean_indexes()
|
||||||
|
|
||||||
|
Sort subs and reset their index attribute. Should be called after
|
||||||
|
destructive operations like split or such.
|
||||||
|
"""
|
||||||
|
self.sort()
|
||||||
|
for index, item in enumerate(self):
|
||||||
|
item.index = index + 1
|
||||||
|
|
||||||
|
@property
|
||||||
|
def text(self):
|
||||||
|
return '\n'.join(i.text for i in self)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def open(cls, path='', encoding=None, error_handling=ERROR_PASS):
|
||||||
|
"""
|
||||||
|
open([path, [encoding]])
|
||||||
|
|
||||||
|
If you do not provide any encoding, it can be detected if the file
|
||||||
|
contain a bit order mark, unless it is set to utf-8 as default.
|
||||||
|
"""
|
||||||
|
new_file = cls(path=path, encoding=encoding)
|
||||||
|
source_file = cls._open_unicode_file(path, claimed_encoding=encoding)
|
||||||
|
new_file.read(source_file, error_handling=error_handling)
|
||||||
|
source_file.close()
|
||||||
|
return new_file
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_string(cls, source, **kwargs):
|
||||||
|
"""
|
||||||
|
from_string(source, **kwargs) -> SubRipFile
|
||||||
|
|
||||||
|
`source` -> a unicode instance or at least a str instance encoded with
|
||||||
|
`sys.getdefaultencoding()`
|
||||||
|
"""
|
||||||
|
error_handling = kwargs.pop('error_handling', None)
|
||||||
|
new_file = cls(**kwargs)
|
||||||
|
new_file.read(source.splitlines(True), error_handling=error_handling)
|
||||||
|
return new_file
|
||||||
|
|
||||||
|
def read(self, source_file, error_handling=ERROR_PASS):
|
||||||
|
"""
|
||||||
|
read(source_file, [error_handling])
|
||||||
|
|
||||||
|
This method parse subtitles contained in `source_file` and append them
|
||||||
|
to the current instance.
|
||||||
|
|
||||||
|
`source_file` -> Any iterable that yield unicode strings, like a file
|
||||||
|
opened with `codecs.open()` or an array of unicode.
|
||||||
|
"""
|
||||||
|
self.eol = self._guess_eol(source_file)
|
||||||
|
self.extend(self.stream(source_file, error_handling=error_handling))
|
||||||
|
return self
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def stream(cls, source_file, error_handling=ERROR_PASS):
|
||||||
|
"""
|
||||||
|
stream(source_file, [error_handling])
|
||||||
|
|
||||||
|
This method yield SubRipItem instances a soon as they have been parsed
|
||||||
|
without storing them. It is a kind of SAX parser for .srt files.
|
||||||
|
|
||||||
|
`source_file` -> Any iterable that yield unicode strings, like a file
|
||||||
|
opened with `codecs.open()` or an array of unicode.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
>>> import pysrt
|
||||||
|
>>> import codecs
|
||||||
|
>>> file = codecs.open('movie.srt', encoding='utf-8')
|
||||||
|
>>> for sub in pysrt.stream(file):
|
||||||
|
... sub.text += "\nHello !"
|
||||||
|
... print unicode(sub)
|
||||||
|
"""
|
||||||
|
string_buffer = []
|
||||||
|
for index, line in enumerate(chain(source_file, '\n')):
|
||||||
|
if line.strip():
|
||||||
|
string_buffer.append(line)
|
||||||
|
else:
|
||||||
|
source = string_buffer
|
||||||
|
string_buffer = []
|
||||||
|
if source and all(source):
|
||||||
|
try:
|
||||||
|
yield SubRipItem.from_lines(source)
|
||||||
|
except Error as error:
|
||||||
|
error.args += (''.join(source), )
|
||||||
|
cls._handle_error(error, error_handling, index)
|
||||||
|
|
||||||
|
def save(self, path=None, encoding=None, eol=None):
|
||||||
|
"""
|
||||||
|
save([path][, encoding][, eol])
|
||||||
|
|
||||||
|
Use initial path if no other provided.
|
||||||
|
Use initial encoding if no other provided.
|
||||||
|
Use initial eol if no other provided.
|
||||||
|
"""
|
||||||
|
path = path or self.path
|
||||||
|
encoding = encoding or self.encoding
|
||||||
|
|
||||||
|
save_file = codecs.open(path, 'w+', encoding=encoding)
|
||||||
|
self.write_into(save_file, eol=eol)
|
||||||
|
save_file.close()
|
||||||
|
|
||||||
|
def write_into(self, output_file, eol=None):
|
||||||
|
"""
|
||||||
|
write_into(output_file [, eol])
|
||||||
|
|
||||||
|
Serialize current state into `output_file`.
|
||||||
|
|
||||||
|
`output_file` -> Any instance that respond to `write()`, typically a
|
||||||
|
file object
|
||||||
|
"""
|
||||||
|
output_eol = eol or self.eol
|
||||||
|
|
||||||
|
for item in self:
|
||||||
|
string_repr = str(item)
|
||||||
|
if output_eol != '\n':
|
||||||
|
string_repr = string_repr.replace('\n', output_eol)
|
||||||
|
output_file.write(string_repr)
|
||||||
|
# Only add trailing eol if it's not already present.
|
||||||
|
# It was kept in the SubRipItem's text before but it really
|
||||||
|
# belongs here. Existing applications might give us subtitles
|
||||||
|
# which already contain a trailing eol though.
|
||||||
|
if not string_repr.endswith(2 * output_eol):
|
||||||
|
output_file.write(output_eol)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _guess_eol(cls, string_iterable):
|
||||||
|
first_line = cls._get_first_line(string_iterable)
|
||||||
|
for eol in ('\r\n', '\r', '\n'):
|
||||||
|
if first_line.endswith(eol):
|
||||||
|
return eol
|
||||||
|
return os.linesep
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _get_first_line(cls, string_iterable):
|
||||||
|
if hasattr(string_iterable, 'tell'):
|
||||||
|
previous_position = string_iterable.tell()
|
||||||
|
|
||||||
|
try:
|
||||||
|
first_line = next(iter(string_iterable))
|
||||||
|
except StopIteration:
|
||||||
|
return ''
|
||||||
|
if hasattr(string_iterable, 'seek'):
|
||||||
|
string_iterable.seek(previous_position)
|
||||||
|
|
||||||
|
return first_line
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _detect_encoding(cls, path):
|
||||||
|
file_descriptor = open(path, 'rb')
|
||||||
|
first_chars = file_descriptor.read(BIGGER_BOM)
|
||||||
|
file_descriptor.close()
|
||||||
|
|
||||||
|
for bom, encoding in BOMS:
|
||||||
|
if first_chars.startswith(bom):
|
||||||
|
return encoding
|
||||||
|
|
||||||
|
# TODO: maybe a chardet integration
|
||||||
|
return cls.DEFAULT_ENCODING
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _open_unicode_file(cls, path, claimed_encoding=None):
|
||||||
|
encoding = claimed_encoding or cls._detect_encoding(path)
|
||||||
|
source_file = codecs.open(path, 'rU', encoding=encoding)
|
||||||
|
|
||||||
|
# get rid of BOM if any
|
||||||
|
possible_bom = CODECS_BOMS.get(encoding, None)
|
||||||
|
if possible_bom:
|
||||||
|
file_bom = source_file.read(len(possible_bom))
|
||||||
|
if not file_bom == possible_bom:
|
||||||
|
source_file.seek(0) # if not rewind
|
||||||
|
return source_file
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _handle_error(cls, error, error_handling, index):
|
||||||
|
if error_handling == cls.ERROR_RAISE:
|
||||||
|
error.args = (index, ) + error.args
|
||||||
|
raise error
|
||||||
|
if error_handling == cls.ERROR_LOG:
|
||||||
|
name = type(error).__name__
|
||||||
|
sys.stderr.write('PySRT-%s(line %s): \n' % (name, index))
|
||||||
|
sys.stderr.write(error.args[0].encode('ascii', 'replace'))
|
||||||
|
sys.stderr.write('\n')
|
76
lib/pysrt/srtitem.py
Normal file
76
lib/pysrt/srtitem.py
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
SubRip's subtitle parser
|
||||||
|
"""
|
||||||
|
from pysrt.srtexc import InvalidItem, InvalidIndex
|
||||||
|
from pysrt.srttime import SubRipTime
|
||||||
|
from pysrt.comparablemixin import ComparableMixin
|
||||||
|
from pysrt.compat import str
|
||||||
|
|
||||||
|
class SubRipItem(ComparableMixin):
|
||||||
|
"""
|
||||||
|
SubRipItem(index, start, end, text, position)
|
||||||
|
|
||||||
|
index -> int: index of item in file. 0 by default.
|
||||||
|
start, end -> SubRipTime or coercible.
|
||||||
|
text -> unicode: text content for item.
|
||||||
|
position -> unicode: raw srt/vtt "display coordinates" string
|
||||||
|
"""
|
||||||
|
ITEM_PATTERN = '%s\n%s --> %s%s\n%s\n'
|
||||||
|
TIMESTAMP_SEPARATOR = '-->'
|
||||||
|
|
||||||
|
def __init__(self, index=0, start=None, end=None, text='', position=''):
|
||||||
|
try:
|
||||||
|
self.index = int(index)
|
||||||
|
except (TypeError, ValueError): # try to cast as int, but it's not mandatory
|
||||||
|
self.index = index
|
||||||
|
|
||||||
|
self.start = SubRipTime.coerce(start or 0)
|
||||||
|
self.end = SubRipTime.coerce(end or 0)
|
||||||
|
self.position = str(position)
|
||||||
|
self.text = str(text)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
position = ' %s' % self.position if self.position.strip() else ''
|
||||||
|
return self.ITEM_PATTERN % (self.index, self.start, self.end,
|
||||||
|
position, self.text)
|
||||||
|
|
||||||
|
def _cmpkey(self):
|
||||||
|
return (self.start, self.end)
|
||||||
|
|
||||||
|
def shift(self, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
shift(hours, minutes, seconds, milliseconds, ratio)
|
||||||
|
|
||||||
|
Add given values to start and end attributes.
|
||||||
|
All arguments are optional and have a default value of 0.
|
||||||
|
"""
|
||||||
|
self.start.shift(*args, **kwargs)
|
||||||
|
self.end.shift(*args, **kwargs)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_string(cls, source):
|
||||||
|
return cls.from_lines(source.splitlines(True))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_lines(cls, lines):
|
||||||
|
if len(lines) < 2:
|
||||||
|
raise InvalidItem()
|
||||||
|
lines = [l.rstrip() for l in lines]
|
||||||
|
index = None
|
||||||
|
if cls.TIMESTAMP_SEPARATOR not in lines[0]:
|
||||||
|
index = lines.pop(0)
|
||||||
|
start, end, position = cls.split_timestamps(lines[0])
|
||||||
|
body = '\n'.join(lines[1:])
|
||||||
|
return cls(index, start, end, body, position)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def split_timestamps(cls, line):
|
||||||
|
timestamps = line.split(cls.TIMESTAMP_SEPARATOR)
|
||||||
|
if len(timestamps) != 2:
|
||||||
|
raise InvalidItem()
|
||||||
|
start, end_and_position = timestamps
|
||||||
|
end_and_position = end_and_position.lstrip().split(' ', 1)
|
||||||
|
end = end_and_position[0]
|
||||||
|
position = end_and_position[1] if len(end_and_position) > 1 else ''
|
||||||
|
return (s.strip() for s in (start, end, position))
|
176
lib/pysrt/srttime.py
Normal file
176
lib/pysrt/srttime.py
Normal file
|
@ -0,0 +1,176 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
SubRip's time format parser: HH:MM:SS,mmm
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
from datetime import time
|
||||||
|
|
||||||
|
from pysrt.srtexc import InvalidTimeString
|
||||||
|
from pysrt.comparablemixin import ComparableMixin
|
||||||
|
from pysrt.compat import str, basestring
|
||||||
|
|
||||||
|
class TimeItemDescriptor(object):
|
||||||
|
# pylint: disable-msg=R0903
|
||||||
|
def __init__(self, ratio, super_ratio=0):
|
||||||
|
self.ratio = int(ratio)
|
||||||
|
self.super_ratio = int(super_ratio)
|
||||||
|
|
||||||
|
def _get_ordinal(self, instance):
|
||||||
|
if self.super_ratio:
|
||||||
|
return instance.ordinal % self.super_ratio
|
||||||
|
return instance.ordinal
|
||||||
|
|
||||||
|
def __get__(self, instance, klass):
|
||||||
|
if instance is None:
|
||||||
|
raise AttributeError
|
||||||
|
return self._get_ordinal(instance) // self.ratio
|
||||||
|
|
||||||
|
def __set__(self, instance, value):
|
||||||
|
part = self._get_ordinal(instance) - instance.ordinal % self.ratio
|
||||||
|
instance.ordinal += value * self.ratio - part
|
||||||
|
|
||||||
|
|
||||||
|
class SubRipTime(ComparableMixin):
|
||||||
|
TIME_PATTERN = '%02d:%02d:%02d,%03d'
|
||||||
|
TIME_REPR = 'SubRipTime(%d, %d, %d, %d)'
|
||||||
|
RE_TIME_SEP = re.compile(r'\:|\.|\,')
|
||||||
|
RE_INTEGER = re.compile(r'^(\d+)')
|
||||||
|
SECONDS_RATIO = 1000
|
||||||
|
MINUTES_RATIO = SECONDS_RATIO * 60
|
||||||
|
HOURS_RATIO = MINUTES_RATIO * 60
|
||||||
|
|
||||||
|
hours = TimeItemDescriptor(HOURS_RATIO)
|
||||||
|
minutes = TimeItemDescriptor(MINUTES_RATIO, HOURS_RATIO)
|
||||||
|
seconds = TimeItemDescriptor(SECONDS_RATIO, MINUTES_RATIO)
|
||||||
|
milliseconds = TimeItemDescriptor(1, SECONDS_RATIO)
|
||||||
|
|
||||||
|
def __init__(self, hours=0, minutes=0, seconds=0, milliseconds=0):
|
||||||
|
"""
|
||||||
|
SubRipTime(hours, minutes, seconds, milliseconds)
|
||||||
|
|
||||||
|
All arguments are optional and have a default value of 0.
|
||||||
|
"""
|
||||||
|
super(SubRipTime, self).__init__()
|
||||||
|
self.ordinal = hours * self.HOURS_RATIO \
|
||||||
|
+ minutes * self.MINUTES_RATIO \
|
||||||
|
+ seconds * self.SECONDS_RATIO \
|
||||||
|
+ milliseconds
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return self.TIME_REPR % tuple(self)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if self.ordinal < 0:
|
||||||
|
# Represent negative times as zero
|
||||||
|
return str(SubRipTime.from_ordinal(0))
|
||||||
|
return self.TIME_PATTERN % tuple(self)
|
||||||
|
|
||||||
|
def _compare(self, other, method):
|
||||||
|
return super(SubRipTime, self)._compare(self.coerce(other), method)
|
||||||
|
|
||||||
|
def _cmpkey(self):
|
||||||
|
return self.ordinal
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
return self.from_ordinal(self.ordinal + self.coerce(other).ordinal)
|
||||||
|
|
||||||
|
def __iadd__(self, other):
|
||||||
|
self.ordinal += self.coerce(other).ordinal
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __sub__(self, other):
|
||||||
|
return self.from_ordinal(self.ordinal - self.coerce(other).ordinal)
|
||||||
|
|
||||||
|
def __isub__(self, other):
|
||||||
|
self.ordinal -= self.coerce(other).ordinal
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __mul__(self, ratio):
|
||||||
|
return self.from_ordinal(int(round(self.ordinal * ratio)))
|
||||||
|
|
||||||
|
def __imul__(self, ratio):
|
||||||
|
self.ordinal = int(round(self.ordinal * ratio))
|
||||||
|
return self
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def coerce(cls, other):
|
||||||
|
"""
|
||||||
|
Coerce many types to SubRipTime instance.
|
||||||
|
Supported types:
|
||||||
|
- str/unicode
|
||||||
|
- int/long
|
||||||
|
- datetime.time
|
||||||
|
- any iterable
|
||||||
|
- dict
|
||||||
|
"""
|
||||||
|
if isinstance(other, SubRipTime):
|
||||||
|
return other
|
||||||
|
if isinstance(other, basestring):
|
||||||
|
return cls.from_string(other)
|
||||||
|
if isinstance(other, int):
|
||||||
|
return cls.from_ordinal(other)
|
||||||
|
if isinstance(other, time):
|
||||||
|
return cls.from_time(other)
|
||||||
|
try:
|
||||||
|
return cls(**other)
|
||||||
|
except TypeError:
|
||||||
|
return cls(*other)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
yield self.hours
|
||||||
|
yield self.minutes
|
||||||
|
yield self.seconds
|
||||||
|
yield self.milliseconds
|
||||||
|
|
||||||
|
def shift(self, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
shift(hours, minutes, seconds, milliseconds)
|
||||||
|
|
||||||
|
All arguments are optional and have a default value of 0.
|
||||||
|
"""
|
||||||
|
if 'ratio' in kwargs:
|
||||||
|
self *= kwargs.pop('ratio')
|
||||||
|
self += self.__class__(*args, **kwargs)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_ordinal(cls, ordinal):
|
||||||
|
"""
|
||||||
|
int -> SubRipTime corresponding to a total count of milliseconds
|
||||||
|
"""
|
||||||
|
return cls(milliseconds=int(ordinal))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_string(cls, source):
|
||||||
|
"""
|
||||||
|
str/unicode(HH:MM:SS,mmm) -> SubRipTime corresponding to serial
|
||||||
|
raise InvalidTimeString
|
||||||
|
"""
|
||||||
|
items = cls.RE_TIME_SEP.split(source)
|
||||||
|
if len(items) != 4:
|
||||||
|
raise InvalidTimeString
|
||||||
|
return cls(*(cls.parse_int(i) for i in items))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse_int(cls, digits):
|
||||||
|
try:
|
||||||
|
return int(digits)
|
||||||
|
except ValueError:
|
||||||
|
match = cls.RE_INTEGER.match(digits)
|
||||||
|
if match:
|
||||||
|
return int(match.group())
|
||||||
|
return 0
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_time(cls, source):
|
||||||
|
"""
|
||||||
|
datetime.time -> SubRipTime corresponding to time object
|
||||||
|
"""
|
||||||
|
return cls(hours=source.hour, minutes=source.minute,
|
||||||
|
seconds=source.second, milliseconds=source.microsecond // 1000)
|
||||||
|
|
||||||
|
def to_time(self):
|
||||||
|
"""
|
||||||
|
Convert SubRipTime instance into a pure datetime.time object
|
||||||
|
"""
|
||||||
|
return time(self.hours, self.minutes, self.seconds,
|
||||||
|
self.milliseconds * 1000)
|
2
lib/pysrt/version.py
Normal file
2
lib/pysrt/version.py
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
VERSION = (1, 0, 1)
|
||||||
|
VERSION_STRING = '.'.join(str(i) for i in VERSION)
|
|
@ -31,4 +31,4 @@ except ImportError:
|
||||||
|
|
||||||
__all__ = ['SERVICES', 'LANGUAGE_INDEX', 'SERVICE_INDEX', 'SERVICE_CONFIDENCE',
|
__all__ = ['SERVICES', 'LANGUAGE_INDEX', 'SERVICE_INDEX', 'SERVICE_CONFIDENCE',
|
||||||
'MATCHING_CONFIDENCE', 'list_subtitles', 'download_subtitles', 'Pool']
|
'MATCHING_CONFIDENCE', 'list_subtitles', 'download_subtitles', 'Pool']
|
||||||
logging.getLogger("subliminal").addHandler(NullHandler())
|
logging.getLogger(__name__).addHandler(NullHandler())
|
||||||
|
|
|
@ -23,7 +23,7 @@ import logging
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['list_subtitles', 'download_subtitles']
|
__all__ = ['list_subtitles', 'download_subtitles']
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def list_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None):
|
def list_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None):
|
||||||
|
@ -94,10 +94,7 @@ def download_subtitles(paths, languages=None, services=None, force=True, multi=F
|
||||||
order = order or [LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE]
|
order = order or [LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE]
|
||||||
subtitles_by_video = list_subtitles(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter)
|
subtitles_by_video = list_subtitles(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter)
|
||||||
for video, subtitles in subtitles_by_video.iteritems():
|
for video, subtitles in subtitles_by_video.iteritems():
|
||||||
try:
|
subtitles.sort(key=lambda s: key_subtitles(s, video, languages, services, order), reverse=True)
|
||||||
subtitles.sort(key=lambda s: key_subtitles(s, video, languages, services, order), reverse=True)
|
|
||||||
except StopIteration:
|
|
||||||
break
|
|
||||||
results = []
|
results = []
|
||||||
service_instances = {}
|
service_instances = {}
|
||||||
tasks = create_download_tasks(subtitles_by_video, languages, multi)
|
tasks = create_download_tasks(subtitles_by_video, languages, multi)
|
||||||
|
|
|
@ -26,7 +26,7 @@ import threading
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['Worker', 'Pool']
|
__all__ = ['Worker', 'Pool']
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Worker(threading.Thread):
|
class Worker(threading.Thread):
|
||||||
|
|
|
@ -27,7 +27,7 @@ except ImportError:
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['Cache', 'cachedmethod']
|
__all__ = ['Cache', 'cachedmethod']
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Cache(object):
|
class Cache(object):
|
||||||
|
|
|
@ -31,8 +31,8 @@ import logging
|
||||||
__all__ = ['SERVICES', 'LANGUAGE_INDEX', 'SERVICE_INDEX', 'SERVICE_CONFIDENCE', 'MATCHING_CONFIDENCE',
|
__all__ = ['SERVICES', 'LANGUAGE_INDEX', 'SERVICE_INDEX', 'SERVICE_CONFIDENCE', 'MATCHING_CONFIDENCE',
|
||||||
'create_list_tasks', 'create_download_tasks', 'consume_task', 'matching_confidence',
|
'create_list_tasks', 'create_download_tasks', 'consume_task', 'matching_confidence',
|
||||||
'key_subtitles', 'group_by_video']
|
'key_subtitles', 'group_by_video']
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
SERVICES = ['opensubtitles', 'subswiki', 'subtitulos', 'thesubdb', 'addic7ed', 'tvsubtitles', 'itasa', 'usub']
|
SERVICES = ['opensubtitles', 'bierdopje', 'subswiki', 'subtitulos', 'thesubdb', 'addic7ed', 'tvsubtitles']
|
||||||
LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE = range(4)
|
LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE = range(4)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -15,4 +15,4 @@
|
||||||
#
|
#
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
||||||
__version__ = '0.6.3'
|
__version__ = '0.6.2'
|
||||||
|
|
|
@ -20,7 +20,7 @@ import re
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
COUNTRIES = [('AF', 'AFG', '004', u'Afghanistan'),
|
COUNTRIES = [('AF', 'AFG', '004', u'Afghanistan'),
|
||||||
|
@ -619,7 +619,6 @@ LANGUAGES = [('aar', '', 'aa', u'Afar', u'afar'),
|
||||||
('pli', '', 'pi', u'Pali', u'pali'),
|
('pli', '', 'pi', u'Pali', u'pali'),
|
||||||
('pol', '', 'pl', u'Polish', u'polonais'),
|
('pol', '', 'pl', u'Polish', u'polonais'),
|
||||||
('pon', '', '', u'Pohnpeian', u'pohnpei'),
|
('pon', '', '', u'Pohnpeian', u'pohnpei'),
|
||||||
('pob', '', 'pb', u'Brazilian Portuguese', u'brazilian portuguese'),
|
|
||||||
('por', '', 'pt', u'Portuguese', u'portugais'),
|
('por', '', 'pt', u'Portuguese', u'portugais'),
|
||||||
('pra', '', '', u'Prakrit languages', u'prâkrit, langues'),
|
('pra', '', '', u'Prakrit languages', u'prâkrit, langues'),
|
||||||
('pro', '', '', u'Provençal, Old (to 1500)', u'provençal ancien (jusqu\'à 1500)'),
|
('pro', '', '', u'Provençal, Old (to 1500)', u'provençal ancien (jusqu\'à 1500)'),
|
||||||
|
|
|
@ -27,7 +27,7 @@ import zipfile
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['ServiceBase', 'ServiceConfig']
|
__all__ = ['ServiceBase', 'ServiceConfig']
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ServiceBase(object):
|
class ServiceBase(object):
|
||||||
|
@ -82,7 +82,7 @@ class ServiceBase(object):
|
||||||
"""Initialize connection"""
|
"""Initialize connection"""
|
||||||
logger.debug(u'Initializing %s' % self.__class__.__name__)
|
logger.debug(u'Initializing %s' % self.__class__.__name__)
|
||||||
self.session = requests.session()
|
self.session = requests.session()
|
||||||
self.session.headers.update({'User-Agent': self.user_agent})
|
self.session.headers.update({'User-Agent': self.user_agent})
|
||||||
|
|
||||||
def init_cache(self):
|
def init_cache(self):
|
||||||
"""Initialize cache, make sure it is loaded from disk"""
|
"""Initialize cache, make sure it is loaded from disk"""
|
||||||
|
@ -220,16 +220,14 @@ class ServiceBase(object):
|
||||||
# TODO: could check if maybe we already have a text file and
|
# TODO: could check if maybe we already have a text file and
|
||||||
# download it directly
|
# download it directly
|
||||||
raise DownloadFailedError('Downloaded file is not a zip file')
|
raise DownloadFailedError('Downloaded file is not a zip file')
|
||||||
zipsub = zipfile.ZipFile(zippath)
|
with zipfile.ZipFile(zippath) as zipsub:
|
||||||
for subfile in zipsub.namelist():
|
for subfile in zipsub.namelist():
|
||||||
if os.path.splitext(subfile)[1] in EXTENSIONS:
|
if os.path.splitext(subfile)[1] in EXTENSIONS:
|
||||||
with open(filepath, 'wb') as f:
|
with open(filepath, 'w') as f:
|
||||||
f.write(zipsub.open(subfile).read())
|
f.write(zipsub.open(subfile).read())
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
zipsub.close()
|
raise DownloadFailedError('No subtitles found in zip file')
|
||||||
raise DownloadFailedError('No subtitles found in zip file')
|
|
||||||
zipsub.close()
|
|
||||||
os.remove(zippath)
|
os.remove(zippath)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(u'Download %s failed: %s' % (url, e))
|
logger.error(u'Download %s failed: %s' % (url, e))
|
||||||
|
|
|
@ -29,17 +29,16 @@ import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Addic7ed(ServiceBase):
|
class Addic7ed(ServiceBase):
|
||||||
server_url = 'http://www.addic7ed.com'
|
server_url = 'http://www.addic7ed.com'
|
||||||
site_url = 'http://www.addic7ed.com'
|
|
||||||
api_based = False
|
api_based = False
|
||||||
#TODO: Complete this
|
#TODO: Complete this
|
||||||
languages = language_set(['ar', 'ca', 'de', 'el', 'en', 'es', 'eu', 'fr', 'ga', 'gl', 'he', 'hr', 'hu',
|
languages = language_set(['ar', 'ca', 'de', 'el', 'en', 'es', 'eu', 'fr', 'ga', 'gl', 'he', 'hr', 'hu',
|
||||||
'it', 'pl', 'pt', 'ro', 'ru', 'se', 'pb'])
|
'it', 'pl', 'pt', 'ro', 'ru', 'se', 'pt-br'])
|
||||||
language_map = {'Portuguese (Brazilian)': Language('pob'), 'Greek': Language('gre'),
|
language_map = {'Portuguese (Brazilian)': Language('por-BR'), 'Greek': Language('gre'),
|
||||||
'Spanish (Latin America)': Language('spa'), 'Galego': Language('glg'),
|
'Spanish (Latin America)': Language('spa'), 'Galego': Language('glg'),
|
||||||
u'Català': Language('cat')}
|
u'Català': Language('cat')}
|
||||||
videos = [Episode]
|
videos = [Episode]
|
||||||
|
@ -64,7 +63,6 @@ class Addic7ed(ServiceBase):
|
||||||
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
|
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
|
||||||
|
|
||||||
def query(self, filepath, languages, keywords, series, season, episode):
|
def query(self, filepath, languages, keywords, series, season, episode):
|
||||||
|
|
||||||
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
|
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
|
||||||
self.init_cache()
|
self.init_cache()
|
||||||
try:
|
try:
|
||||||
|
@ -92,7 +90,7 @@ class Addic7ed(ServiceBase):
|
||||||
continue
|
continue
|
||||||
sub_keywords = split_keyword(cells[4].text.strip().lower())
|
sub_keywords = split_keyword(cells[4].text.strip().lower())
|
||||||
#TODO: Maybe allow empty keywords here? (same in Subtitulos)
|
#TODO: Maybe allow empty keywords here? (same in Subtitulos)
|
||||||
if keywords and not keywords & sub_keywords:
|
if not keywords & sub_keywords:
|
||||||
logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
|
logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
|
||||||
continue
|
continue
|
||||||
sub_link = '%s/%s' % (self.server_url, cells[9].a['href'])
|
sub_link = '%s/%s' % (self.server_url, cells[9].a['href'])
|
||||||
|
|
|
@ -31,12 +31,11 @@ except ImportError:
|
||||||
import pickle
|
import pickle
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class BierDopje(ServiceBase):
|
class BierDopje(ServiceBase):
|
||||||
server_url = 'http://api.bierdopje.com/A2B638AC5D804C2E/'
|
server_url = 'http://api.bierdopje.com/A2B638AC5D804C2E/'
|
||||||
site_url = 'http://www.bierdopje.com'
|
|
||||||
user_agent = 'Subliminal/0.6'
|
user_agent = 'Subliminal/0.6'
|
||||||
api_based = True
|
api_based = True
|
||||||
languages = language_set(['eng', 'dut'])
|
languages = language_set(['eng', 'dut'])
|
||||||
|
|
|
@ -1,216 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2012 Mr_Orange <mr_orange@hotmail.it>
|
|
||||||
#
|
|
||||||
# This file is part of subliminal.
|
|
||||||
#
|
|
||||||
# subliminal is free software; you can redistribute it and/or modify it under
|
|
||||||
# the terms of the GNU Lesser General Public License as published by
|
|
||||||
# the Free Software Foundation; either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# subliminal is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
from . import ServiceBase
|
|
||||||
from ..exceptions import DownloadFailedError, ServiceError
|
|
||||||
from ..cache import cachedmethod
|
|
||||||
from ..language import language_set, Language
|
|
||||||
from ..subtitles import get_subtitle_path, ResultSubtitle, EXTENSIONS
|
|
||||||
from ..utils import get_keywords
|
|
||||||
from ..videos import Episode
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
import os
|
|
||||||
import requests
|
|
||||||
import zipfile
|
|
||||||
import StringIO
|
|
||||||
import guessit
|
|
||||||
|
|
||||||
from sickbeard.common import Quality
|
|
||||||
|
|
||||||
logger = logging.getLogger("subliminal")
|
|
||||||
|
|
||||||
|
|
||||||
class Itasa(ServiceBase):
|
|
||||||
server_url = 'http://www.italiansubs.net/'
|
|
||||||
site_url = 'http://www.italiansubs.net/'
|
|
||||||
api_based = False
|
|
||||||
languages = language_set(['it'])
|
|
||||||
videos = [Episode]
|
|
||||||
require_video = False
|
|
||||||
required_features = ['permissive']
|
|
||||||
quality_dict = {Quality.SDTV : '',
|
|
||||||
Quality.SDDVD : 'dvdrip',
|
|
||||||
Quality.RAWHDTV : '1080i',
|
|
||||||
Quality.HDTV : '720p',
|
|
||||||
Quality.FULLHDTV : ('1080p','720p'),
|
|
||||||
Quality.HDWEBDL : 'web-dl',
|
|
||||||
Quality.FULLHDWEBDL : 'web-dl',
|
|
||||||
Quality.HDBLURAY : ('bdrip', 'bluray'),
|
|
||||||
Quality.FULLHDBLURAY : ('bdrip', 'bluray'),
|
|
||||||
Quality.UNKNOWN : 'unknown' #Any subtitle will be downloaded
|
|
||||||
}
|
|
||||||
|
|
||||||
def init(self):
|
|
||||||
|
|
||||||
super(Itasa, self).init()
|
|
||||||
login_pattern = '<input type="hidden" name="return" value="([^\n\r\t ]+?)" /><input type="hidden" name="([^\n\r\t ]+?)" value="([^\n\r\t ]+?)" />'
|
|
||||||
|
|
||||||
response = requests.get(self.server_url + 'index.php')
|
|
||||||
if response.status_code != 200:
|
|
||||||
raise ServiceError('Initiate failed')
|
|
||||||
|
|
||||||
match = re.search(login_pattern, response.content, re.IGNORECASE | re.DOTALL)
|
|
||||||
if not match:
|
|
||||||
raise ServiceError('Can not find unique id parameter on page')
|
|
||||||
|
|
||||||
login_parameter = {'username': 'sickbeard',
|
|
||||||
'passwd': 'subliminal',
|
|
||||||
'remember': 'yes',
|
|
||||||
'Submit': 'Login',
|
|
||||||
'remember': 'yes',
|
|
||||||
'option': 'com_user',
|
|
||||||
'task': 'login',
|
|
||||||
'silent': 'true',
|
|
||||||
'return': match.group(1),
|
|
||||||
match.group(2): match.group(3)
|
|
||||||
}
|
|
||||||
|
|
||||||
self.session = requests.session()
|
|
||||||
r = self.session.post(self.server_url + 'index.php', data=login_parameter)
|
|
||||||
if not re.search('logouticon.png', r.content, re.IGNORECASE | re.DOTALL):
|
|
||||||
raise ServiceError('Itasa Login Failed')
|
|
||||||
|
|
||||||
@cachedmethod
|
|
||||||
def get_series_id(self, name):
|
|
||||||
"""Get the show page and cache every show found in it"""
|
|
||||||
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=9')
|
|
||||||
soup = BeautifulSoup(r.content, self.required_features)
|
|
||||||
all_series = soup.find('div', attrs = {'id' : 'remositorycontainerlist'})
|
|
||||||
for tv_series in all_series.find_all(href=re.compile('func=select')):
|
|
||||||
series_name = tv_series.text.lower().strip().replace(':','')
|
|
||||||
match = re.search('&id=([0-9]+)', tv_series['href'])
|
|
||||||
if match is None:
|
|
||||||
continue
|
|
||||||
series_id = int(match.group(1))
|
|
||||||
self.cache_for(self.get_series_id, args=(series_name,), result=series_id)
|
|
||||||
return self.cached_value(self.get_series_id, args=(name,))
|
|
||||||
|
|
||||||
def get_episode_id(self, series, series_id, season, episode, quality):
|
|
||||||
"""Get the id subtitle for episode with the given quality"""
|
|
||||||
|
|
||||||
season_link = None
|
|
||||||
quality_link = None
|
|
||||||
episode_id = None
|
|
||||||
|
|
||||||
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=6&func=select&id=' + str(series_id))
|
|
||||||
soup = BeautifulSoup(r.content, self.required_features)
|
|
||||||
all_seasons = soup.find('div', attrs = {'id' : 'remositorycontainerlist'})
|
|
||||||
for seasons in all_seasons.find_all(href=re.compile('func=select')):
|
|
||||||
if seasons.text.lower().strip() == 'stagione %s' % str(season):
|
|
||||||
season_link = seasons['href']
|
|
||||||
break
|
|
||||||
|
|
||||||
if not season_link:
|
|
||||||
logger.debug(u'Could not find season %s for series %s' % (series, str(season)))
|
|
||||||
return None
|
|
||||||
|
|
||||||
r = self.session.get(season_link)
|
|
||||||
soup = BeautifulSoup(r.content, self.required_features)
|
|
||||||
|
|
||||||
all_qualities = soup.find('div', attrs = {'id' : 'remositorycontainerlist'})
|
|
||||||
for qualities in all_qualities.find_all(href=re.compile('func=select')):
|
|
||||||
if qualities.text.lower().strip() in self.quality_dict[quality]:
|
|
||||||
quality_link = qualities['href']
|
|
||||||
r = self.session.get(qualities['href'])
|
|
||||||
soup = BeautifulSoup(r.content, self.required_features)
|
|
||||||
break
|
|
||||||
|
|
||||||
#If we want SDTV we are just on the right page so quality link will be None
|
|
||||||
if not quality == Quality.SDTV and not quality_link:
|
|
||||||
logger.debug(u'Could not find a subtitle with required quality for series %s season %s' % (series, str(season)))
|
|
||||||
return None
|
|
||||||
|
|
||||||
all_episodes = soup.find('div', attrs = {'id' : 'remositoryfilelisting'})
|
|
||||||
for episodes in all_episodes.find_all(href=re.compile('func=fileinfo')):
|
|
||||||
ep_string = "%(seasonnumber)dx%(episodenumber)02d" % {'seasonnumber': season, 'episodenumber': episode}
|
|
||||||
if re.search(ep_string, episodes.text, re.I) or re.search('completa$', episodes.text, re.I):
|
|
||||||
match = re.search('&id=([0-9]+)', episodes['href'])
|
|
||||||
if match:
|
|
||||||
episode_id = match.group(1)
|
|
||||||
return episode_id
|
|
||||||
|
|
||||||
return episode_id
|
|
||||||
|
|
||||||
def list_checked(self, video, languages):
|
|
||||||
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
|
|
||||||
|
|
||||||
def query(self, filepath, languages, keywords, series, season, episode):
|
|
||||||
|
|
||||||
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
|
|
||||||
self.init_cache()
|
|
||||||
try:
|
|
||||||
series = series.lower().replace('(','').replace(')','')
|
|
||||||
series_id = self.get_series_id(series)
|
|
||||||
except KeyError:
|
|
||||||
logger.debug(u'Could not find series id for %s' % series)
|
|
||||||
return []
|
|
||||||
|
|
||||||
episode_id = self.get_episode_id(series, series_id, season, episode, Quality.nameQuality(filepath))
|
|
||||||
if not episode_id:
|
|
||||||
logger.debug(u'Could not find subtitle for series %s' % series)
|
|
||||||
return []
|
|
||||||
|
|
||||||
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=6&func=fileinfo&id=' + episode_id)
|
|
||||||
soup = BeautifulSoup(r.content)
|
|
||||||
|
|
||||||
sub_link = soup.find('div', attrs = {'id' : 'remositoryfileinfo'}).find(href=re.compile('func=download'))['href']
|
|
||||||
sub_language = self.get_language('it')
|
|
||||||
path = get_subtitle_path(filepath, sub_language, self.config.multi)
|
|
||||||
subtitle = ResultSubtitle(path, sub_language, self.__class__.__name__.lower(), sub_link)
|
|
||||||
|
|
||||||
return [subtitle]
|
|
||||||
|
|
||||||
def download(self, subtitle):
|
|
||||||
|
|
||||||
logger.info(u'Downloading %s in %s' % (subtitle.link, subtitle.path))
|
|
||||||
try:
|
|
||||||
r = self.session.get(subtitle.link, headers={'Referer': self.server_url, 'User-Agent': self.user_agent})
|
|
||||||
zipcontent = StringIO.StringIO(r.content)
|
|
||||||
zipsub = zipfile.ZipFile(zipcontent)
|
|
||||||
|
|
||||||
# if not zipsub.is_zipfile(zipcontent):
|
|
||||||
# raise DownloadFailedError('Downloaded file is not a zip file')
|
|
||||||
|
|
||||||
subfile = ''
|
|
||||||
if len(zipsub.namelist()) == 1:
|
|
||||||
subfile = zipsub.namelist()[0]
|
|
||||||
else:
|
|
||||||
#Season Zip Retrive Season and episode Numbers from path
|
|
||||||
guess = guessit.guess_file_info(subtitle.path, 'episode')
|
|
||||||
ep_string = "s%(seasonnumber)02de%(episodenumber)02d" % {'seasonnumber': guess['season'], 'episodenumber': guess['episodeNumber']}
|
|
||||||
for file in zipsub.namelist():
|
|
||||||
if re.search(ep_string, file, re.I):
|
|
||||||
subfile = file
|
|
||||||
break
|
|
||||||
if os.path.splitext(subfile)[1] in EXTENSIONS:
|
|
||||||
with open(subtitle.path, 'wb') as f:
|
|
||||||
f.write(zipsub.open(subfile).read())
|
|
||||||
else:
|
|
||||||
zipsub.close()
|
|
||||||
raise DownloadFailedError('No subtitles found in zip file')
|
|
||||||
|
|
||||||
zipsub.close()
|
|
||||||
except Exception as e:
|
|
||||||
if os.path.exists(subtitle.path):
|
|
||||||
os.remove(subtitle.path)
|
|
||||||
raise DownloadFailedError(str(e))
|
|
||||||
|
|
||||||
logger.debug(u'Download finished')
|
|
||||||
|
|
||||||
Service = Itasa
|
|
|
@ -27,12 +27,11 @@ import os.path
|
||||||
import xmlrpclib
|
import xmlrpclib
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class OpenSubtitles(ServiceBase):
|
class OpenSubtitles(ServiceBase):
|
||||||
server_url = 'http://api.opensubtitles.org/xml-rpc'
|
server_url = 'http://api.opensubtitles.org/xml-rpc'
|
||||||
site_url = 'http://www.opensubtitles.org'
|
|
||||||
api_based = True
|
api_based = True
|
||||||
# Source: http://www.opensubtitles.org/addons/export_languages.php
|
# Source: http://www.opensubtitles.org/addons/export_languages.php
|
||||||
languages = language_set(['aar', 'abk', 'ace', 'ach', 'ada', 'ady', 'afa', 'afh', 'afr', 'ain', 'aka', 'akk',
|
languages = language_set(['aar', 'abk', 'ace', 'ach', 'ada', 'ady', 'afa', 'afh', 'afr', 'ain', 'aka', 'akk',
|
||||||
|
@ -74,9 +73,9 @@ class OpenSubtitles(ServiceBase):
|
||||||
'twi', 'tyv', 'udm', 'uga', 'uig', 'ukr', 'umb', 'urd', 'uzb', 'vai', 'ven', 'vie',
|
'twi', 'tyv', 'udm', 'uga', 'uig', 'ukr', 'umb', 'urd', 'uzb', 'vai', 'ven', 'vie',
|
||||||
'vol', 'vot', 'wak', 'wal', 'war', 'was', 'wel', 'wen', 'wln', 'wol', 'xal', 'xho',
|
'vol', 'vot', 'wak', 'wal', 'war', 'was', 'wel', 'wen', 'wln', 'wol', 'xal', 'xho',
|
||||||
'yao', 'yap', 'yid', 'yor', 'ypk', 'zap', 'zen', 'zha', 'znd', 'zul', 'zun',
|
'yao', 'yap', 'yid', 'yor', 'ypk', 'zap', 'zen', 'zha', 'znd', 'zul', 'zun',
|
||||||
'pob', 'rum-MD'])
|
'por-BR', 'rum-MD'])
|
||||||
language_map = {'mol': Language('rum-MD'), 'scc': Language('srp'),
|
language_map = {'mol': Language('rum-MD'), 'scc': Language('srp'), 'pob': Language('por-BR'),
|
||||||
Language('rum-MD'): 'mol', Language('srp'): 'scc'}
|
Language('rum-MD'): 'mol', Language('srp'): 'scc', Language('por-BR'): 'pob'}
|
||||||
language_code = 'alpha3'
|
language_code = 'alpha3'
|
||||||
videos = [Episode, Movie]
|
videos = [Episode, Movie]
|
||||||
require_video = False
|
require_video = False
|
||||||
|
|
|
@ -26,21 +26,20 @@ import logging
|
||||||
import xmlrpclib
|
import xmlrpclib
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Podnapisi(ServiceBase):
|
class Podnapisi(ServiceBase):
|
||||||
server_url = 'http://ssp.podnapisi.net:8000'
|
server_url = 'http://ssp.podnapisi.net:8000'
|
||||||
site_url = 'http://www.podnapisi.net'
|
|
||||||
api_based = True
|
api_based = True
|
||||||
languages = language_set(['ar', 'be', 'bg', 'bs', 'ca', 'ca', 'cs', 'da', 'de', 'el', 'en',
|
languages = language_set(['ar', 'be', 'bg', 'bs', 'ca', 'ca', 'cs', 'da', 'de', 'el', 'en',
|
||||||
'es', 'et', 'fa', 'fi', 'fr', 'ga', 'he', 'hi', 'hr', 'hu', 'id',
|
'es', 'et', 'fa', 'fi', 'fr', 'ga', 'he', 'hi', 'hr', 'hu', 'id',
|
||||||
'is', 'it', 'ja', 'ko', 'lt', 'lv', 'mk', 'ms', 'nl', 'nn', 'pl',
|
'is', 'it', 'ja', 'ko', 'lt', 'lv', 'mk', 'ms', 'nl', 'nn', 'pl',
|
||||||
'pt', 'ro', 'ru', 'sk', 'sl', 'sq', 'sr', 'sv', 'th', 'tr', 'uk',
|
'pt', 'ro', 'ru', 'sk', 'sl', 'sq', 'sr', 'sv', 'th', 'tr', 'uk',
|
||||||
'vi', 'zh', 'es-ar', 'pb'])
|
'vi', 'zh', 'es-ar', 'pt-br'])
|
||||||
language_map = {'jp': Language('jpn'), Language('jpn'): 'jp',
|
language_map = {'jp': Language('jpn'), Language('jpn'): 'jp',
|
||||||
'gr': Language('gre'), Language('gre'): 'gr',
|
'gr': Language('gre'), Language('gre'): 'gr',
|
||||||
# 'pb': Language('por-BR'), Language('por-BR'): 'pb',
|
'pb': Language('por-BR'), Language('por-BR'): 'pb',
|
||||||
'ag': Language('spa-AR'), Language('spa-AR'): 'ag',
|
'ag': Language('spa-AR'), Language('spa-AR'): 'ag',
|
||||||
'cyr': Language('srp')}
|
'cyr': Language('srp')}
|
||||||
videos = [Episode, Movie]
|
videos = [Episode, Movie]
|
||||||
|
|
|
@ -1,124 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
|
|
||||||
#
|
|
||||||
# This file is part of subliminal.
|
|
||||||
#
|
|
||||||
# subliminal is free software; you can redistribute it and/or modify it under
|
|
||||||
# the terms of the GNU Lesser General Public License as published by
|
|
||||||
# the Free Software Foundation; either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# subliminal is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
from . import ServiceBase
|
|
||||||
from ..exceptions import DownloadFailedError
|
|
||||||
from ..language import Language, language_set
|
|
||||||
from ..subtitles import ResultSubtitle
|
|
||||||
from ..utils import get_keywords
|
|
||||||
from ..videos import Episode, Movie
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
import guessit
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
from subliminal.subtitles import get_subtitle_path
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("subliminal")
|
|
||||||
|
|
||||||
|
|
||||||
class PodnapisiWeb(ServiceBase):
|
|
||||||
server_url = 'http://simple.podnapisi.net'
|
|
||||||
site_url = 'http://www.podnapisi.net'
|
|
||||||
api_based = True
|
|
||||||
user_agent = 'Subliminal/0.6'
|
|
||||||
videos = [Episode, Movie]
|
|
||||||
require_video = False
|
|
||||||
required_features = ['xml']
|
|
||||||
languages = language_set(['Albanian', 'Arabic', 'Spanish (Argentina)', 'Belarusian', 'Bosnian', 'Portuguese (Brazil)', 'Bulgarian', 'Catalan',
|
|
||||||
'Chinese', 'Croatian', 'Czech', 'Danish', 'Dutch', 'English', 'Estonian', 'Persian',
|
|
||||||
'Finnish', 'French', 'German', 'gre', 'Kalaallisut', 'Hebrew', 'Hindi', 'Hungarian',
|
|
||||||
'Icelandic', 'Indonesian', 'Irish', 'Italian', 'Japanese', 'Kazakh', 'Korean', 'Latvian',
|
|
||||||
'Lithuanian', 'Macedonian', 'Malay', 'Norwegian', 'Polish', 'Portuguese', 'Romanian',
|
|
||||||
'Russian', 'Serbian', 'Sinhala', 'Slovak', 'Slovenian', 'Spanish', 'Swedish', 'Thai',
|
|
||||||
'Turkish', 'Ukrainian', 'Vietnamese'])
|
|
||||||
language_map = {Language('Albanian'): 29, Language('Arabic'): 12, Language('Spanish (Argentina)'): 14, Language('Belarusian'): 50,
|
|
||||||
Language('Bosnian'): 10, Language('Portuguese (Brazil)'): 48, Language('Bulgarian'): 33, Language('Catalan'): 53,
|
|
||||||
Language('Chinese'): 17, Language('Croatian'): 38, Language('Czech'): 7, Language('Danish'): 24,
|
|
||||||
Language('Dutch'): 23, Language('English'): 2, Language('Estonian'): 20, Language('Persian'): 52,
|
|
||||||
Language('Finnish'): 31, Language('French'): 8, Language('German'): 5, Language('gre'): 16,
|
|
||||||
Language('Kalaallisut'): 57, Language('Hebrew'): 22, Language('Hindi'): 42, Language('Hungarian'): 15,
|
|
||||||
Language('Icelandic'): 6, Language('Indonesian'): 54, Language('Irish'): 49, Language('Italian'): 9,
|
|
||||||
Language('Japanese'): 11, Language('Kazakh'): 58, Language('Korean'): 4, Language('Latvian'): 21,
|
|
||||||
Language('Lithuanian'): 19, Language('Macedonian'): 35, Language('Malay'): 55,
|
|
||||||
Language('Norwegian'): 3, Language('Polish'): 26, Language('Portuguese'): 32, Language('Romanian'): 13,
|
|
||||||
Language('Russian'): 27, Language('Serbian'): 36, Language('Sinhala'): 56, Language('Slovak'): 37,
|
|
||||||
Language('Slovenian'): 1, Language('Spanish'): 28, Language('Swedish'): 25, Language('Thai'): 44,
|
|
||||||
Language('Turkish'): 30, Language('Ukrainian'): 46, Language('Vietnamese'): 51,
|
|
||||||
29: Language('Albanian'), 12: Language('Arabic'), 14: Language('Spanish (Argentina)'), 50: Language('Belarusian'),
|
|
||||||
10: Language('Bosnian'), 48: Language('Portuguese (Brazil)'), 33: Language('Bulgarian'), 53: Language('Catalan'),
|
|
||||||
17: Language('Chinese'), 38: Language('Croatian'), 7: Language('Czech'), 24: Language('Danish'),
|
|
||||||
23: Language('Dutch'), 2: Language('English'), 20: Language('Estonian'), 52: Language('Persian'),
|
|
||||||
31: Language('Finnish'), 8: Language('French'), 5: Language('German'), 16: Language('gre'),
|
|
||||||
57: Language('Kalaallisut'), 22: Language('Hebrew'), 42: Language('Hindi'), 15: Language('Hungarian'),
|
|
||||||
6: Language('Icelandic'), 54: Language('Indonesian'), 49: Language('Irish'), 9: Language('Italian'),
|
|
||||||
11: Language('Japanese'), 58: Language('Kazakh'), 4: Language('Korean'), 21: Language('Latvian'),
|
|
||||||
19: Language('Lithuanian'), 35: Language('Macedonian'), 55: Language('Malay'), 40: Language('Chinese'),
|
|
||||||
3: Language('Norwegian'), 26: Language('Polish'), 32: Language('Portuguese'), 13: Language('Romanian'),
|
|
||||||
27: Language('Russian'), 36: Language('Serbian'), 47: Language('Serbian'), 56: Language('Sinhala'),
|
|
||||||
37: Language('Slovak'), 1: Language('Slovenian'), 28: Language('Spanish'), 25: Language('Swedish'),
|
|
||||||
44: Language('Thai'), 30: Language('Turkish'), 46: Language('Ukrainian'), Language('Vietnamese'): 51}
|
|
||||||
|
|
||||||
def list_checked(self, video, languages):
|
|
||||||
if isinstance(video, Movie):
|
|
||||||
return self.query(video.path or video.release, languages, video.title, year=video.year,
|
|
||||||
keywords=get_keywords(video.guess))
|
|
||||||
if isinstance(video, Episode):
|
|
||||||
return self.query(video.path or video.release, languages, video.series, season=video.season,
|
|
||||||
episode=video.episode, keywords=get_keywords(video.guess))
|
|
||||||
|
|
||||||
def query(self, filepath, languages, title, season=None, episode=None, year=None, keywords=None):
|
|
||||||
params = {'sXML': 1, 'sK': title, 'sJ': ','.join([str(self.get_code(l)) for l in languages])}
|
|
||||||
if season is not None:
|
|
||||||
params['sTS'] = season
|
|
||||||
if episode is not None:
|
|
||||||
params['sTE'] = episode
|
|
||||||
if year is not None:
|
|
||||||
params['sY'] = year
|
|
||||||
if keywords is not None:
|
|
||||||
params['sR'] = keywords
|
|
||||||
r = self.session.get(self.server_url + '/ppodnapisi/search', params=params)
|
|
||||||
if r.status_code != 200:
|
|
||||||
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
|
|
||||||
return []
|
|
||||||
subtitles = []
|
|
||||||
soup = BeautifulSoup(r.content, self.required_features)
|
|
||||||
for sub in soup('subtitle'):
|
|
||||||
if 'n' in sub.flags:
|
|
||||||
logger.debug(u'Skipping hearing impaired')
|
|
||||||
continue
|
|
||||||
language = self.get_language(sub.languageId.text)
|
|
||||||
confidence = float(sub.rating.text) / 5.0
|
|
||||||
sub_keywords = set()
|
|
||||||
for release in sub.release.text.split():
|
|
||||||
sub_keywords |= get_keywords(guessit.guess_file_info(release + '.srt', 'autodetect'))
|
|
||||||
sub_path = get_subtitle_path(filepath, language, self.config.multi)
|
|
||||||
subtitle = ResultSubtitle(sub_path, language, self.__class__.__name__.lower(),
|
|
||||||
sub.url.text, confidence=confidence, keywords=sub_keywords)
|
|
||||||
subtitles.append(subtitle)
|
|
||||||
return subtitles
|
|
||||||
|
|
||||||
def download(self, subtitle):
|
|
||||||
r = self.session.get(subtitle.link)
|
|
||||||
if r.status_code != 200:
|
|
||||||
raise DownloadFailedError()
|
|
||||||
soup = BeautifulSoup(r.content)
|
|
||||||
self.download_zip_file(self.server_url + soup.find('a', href=re.compile('download'))['href'], subtitle.path)
|
|
||||||
return subtitle
|
|
||||||
|
|
||||||
|
|
||||||
Service = PodnapisiWeb
|
|
|
@ -26,16 +26,15 @@ import logging
|
||||||
import urllib
|
import urllib
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class SubsWiki(ServiceBase):
|
class SubsWiki(ServiceBase):
|
||||||
server_url = 'http://www.subswiki.com'
|
server_url = 'http://www.subswiki.com'
|
||||||
site_url = 'http://www.subswiki.com'
|
|
||||||
api_based = False
|
api_based = False
|
||||||
languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'pob', 'por', 'spa-ES', u'spa', u'ita', u'cat'])
|
languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'por-BR', 'por', 'spa-ES', u'spa', u'ita', u'cat'])
|
||||||
language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), u'Español (Latinoamérica)': Language('spa'),
|
language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), u'Español (Latinoamérica)': Language('spa'),
|
||||||
u'Català': Language('cat'), u'Brazilian': Language('pob'), u'English (US)': Language('eng-US'),
|
u'Català': Language('cat'), u'Brazilian': Language('por-BR'), u'English (US)': Language('eng-US'),
|
||||||
u'English (UK)': Language('eng-GB')}
|
u'English (UK)': Language('eng-GB')}
|
||||||
language_code = 'name'
|
language_code = 'name'
|
||||||
videos = [Episode, Movie]
|
videos = [Episode, Movie]
|
||||||
|
@ -78,7 +77,7 @@ class SubsWiki(ServiceBase):
|
||||||
subtitles = []
|
subtitles = []
|
||||||
for sub in soup('td', {'class': 'NewsTitle'}):
|
for sub in soup('td', {'class': 'NewsTitle'}):
|
||||||
sub_keywords = split_keyword(sub.b.string.lower())
|
sub_keywords = split_keyword(sub.b.string.lower())
|
||||||
if keywords and not keywords & sub_keywords:
|
if not keywords & sub_keywords:
|
||||||
logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
|
logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
|
||||||
continue
|
continue
|
||||||
for html_language in sub.parent.parent.find_all('td', {'class': 'language'}):
|
for html_language in sub.parent.parent.find_all('td', {'class': 'language'}):
|
||||||
|
|
|
@ -27,16 +27,15 @@ import unicodedata
|
||||||
import urllib
|
import urllib
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Subtitulos(ServiceBase):
|
class Subtitulos(ServiceBase):
|
||||||
server_url = 'http://www.subtitulos.es'
|
server_url = 'http://www.subtitulos.es'
|
||||||
site_url = 'http://www.subtitulos.es'
|
|
||||||
api_based = False
|
api_based = False
|
||||||
languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'pob', 'por', 'spa-ES', u'spa', u'ita', u'cat'])
|
languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'por-BR', 'por', 'spa-ES', u'spa', u'ita', u'cat'])
|
||||||
language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), #u'Español (Latinoamérica)': Language('spa'),
|
language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), u'Español (Latinoamérica)': Language('spa'),
|
||||||
u'Català': Language('cat'), u'Brazilian': Language('pob'), u'English (US)': Language('eng-US'),
|
u'Català': Language('cat'), u'Brazilian': Language('por-BR'), u'English (US)': Language('eng-US'),
|
||||||
u'English (UK)': Language('eng-GB'), 'Galego': Language('glg')}
|
u'English (UK)': Language('eng-GB'), 'Galego': Language('glg')}
|
||||||
language_code = 'name'
|
language_code = 'name'
|
||||||
videos = [Episode]
|
videos = [Episode]
|
||||||
|
@ -46,13 +45,12 @@ class Subtitulos(ServiceBase):
|
||||||
# and the 'ó' char directly. This is because now BS4 converts the html
|
# and the 'ó' char directly. This is because now BS4 converts the html
|
||||||
# code chars into their equivalent unicode char
|
# code chars into their equivalent unicode char
|
||||||
release_pattern = re.compile('Versi.+n (.+) ([0-9]+).([0-9])+ megabytes')
|
release_pattern = re.compile('Versi.+n (.+) ([0-9]+).([0-9])+ megabytes')
|
||||||
extra_keywords_pattern = re.compile("(?:con|para)\s(?:720p)?(?:\-|\s)?([A-Za-z]+)(?:\-|\s)?(?:720p)?(?:\s|\.)(?:y\s)?(?:720p)?(?:\-\s)?([A-Za-z]+)?(?:\-\s)?(?:720p)?(?:\.)?");
|
|
||||||
|
|
||||||
def list_checked(self, video, languages):
|
def list_checked(self, video, languages):
|
||||||
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
|
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
|
||||||
|
|
||||||
def query(self, filepath, languages, keywords, series, season, episode):
|
def query(self, filepath, languages, keywords, series, season, episode):
|
||||||
request_series = series.lower().replace(' ', '-').replace('&', '@').replace('(','').replace(')','')
|
request_series = series.lower().replace(' ', '_')
|
||||||
if isinstance(request_series, unicode):
|
if isinstance(request_series, unicode):
|
||||||
request_series = unicodedata.normalize('NFKD', request_series).encode('ascii', 'ignore')
|
request_series = unicodedata.normalize('NFKD', request_series).encode('ascii', 'ignore')
|
||||||
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
|
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
|
||||||
|
@ -67,7 +65,7 @@ class Subtitulos(ServiceBase):
|
||||||
subtitles = []
|
subtitles = []
|
||||||
for sub in soup('div', {'id': 'version'}):
|
for sub in soup('div', {'id': 'version'}):
|
||||||
sub_keywords = split_keyword(self.release_pattern.search(sub.find('p', {'class': 'title-sub'}).contents[1]).group(1).lower())
|
sub_keywords = split_keyword(self.release_pattern.search(sub.find('p', {'class': 'title-sub'}).contents[1]).group(1).lower())
|
||||||
if keywords and not keywords & sub_keywords:
|
if not keywords & sub_keywords:
|
||||||
logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
|
logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
|
||||||
continue
|
continue
|
||||||
for html_language in sub.findAllNext('ul', {'class': 'sslist'}):
|
for html_language in sub.findAllNext('ul', {'class': 'sslist'}):
|
||||||
|
|
|
@ -16,23 +16,22 @@
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
||||||
from . import ServiceBase
|
from . import ServiceBase
|
||||||
from ..language import language_set, Language
|
from ..language import language_set
|
||||||
from ..subtitles import get_subtitle_path, ResultSubtitle
|
from ..subtitles import get_subtitle_path, ResultSubtitle
|
||||||
from ..videos import Episode, Movie, UnknownVideo
|
from ..videos import Episode, Movie, UnknownVideo
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class TheSubDB(ServiceBase):
|
class TheSubDB(ServiceBase):
|
||||||
server_url = 'http://api.thesubdb.com'
|
server_url = 'http://api.thesubdb.com'
|
||||||
site_url = 'http://www.thesubdb.com/'
|
|
||||||
user_agent = 'SubDB/1.0 (subliminal/0.6; https://github.com/Diaoul/subliminal)'
|
user_agent = 'SubDB/1.0 (subliminal/0.6; https://github.com/Diaoul/subliminal)'
|
||||||
api_based = True
|
api_based = True
|
||||||
# Source: http://api.thesubdb.com/?action=languages
|
# Source: http://api.thesubdb.com/?action=languages
|
||||||
languages = language_set(['af', 'cs', 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'id', 'it',
|
languages = language_set(['af', 'cs', 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'id', 'it',
|
||||||
'la', 'nl', 'no', 'oc', 'pl', 'pb', 'ro', 'ru', 'sl', 'sr', 'sv',
|
'la', 'nl', 'no', 'oc', 'pl', 'pt', 'ro', 'ru', 'sl', 'sr', 'sv',
|
||||||
'tr'])
|
'tr'])
|
||||||
videos = [Movie, Episode, UnknownVideo]
|
videos = [Movie, Episode, UnknownVideo]
|
||||||
require_video = True
|
require_video = True
|
||||||
|
@ -49,10 +48,6 @@ class TheSubDB(ServiceBase):
|
||||||
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
|
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
|
||||||
return []
|
return []
|
||||||
available_languages = language_set(r.content.split(','))
|
available_languages = language_set(r.content.split(','))
|
||||||
#this is needed becase for theSubDB pt languages is Portoguese Brazil and not Portoguese#
|
|
||||||
#So we are deleting pt language and adding pb language
|
|
||||||
if Language('pt') in available_languages:
|
|
||||||
available_languages = available_languages - language_set(['pt']) | language_set(['pb'])
|
|
||||||
languages &= available_languages
|
languages &= available_languages
|
||||||
if not languages:
|
if not languages:
|
||||||
logger.debug(u'Could not find subtitles for hash %s with languages %r (only %r available)' % (moviehash, languages, available_languages))
|
logger.debug(u'Could not find subtitles for hash %s with languages %r (only %r available)' % (moviehash, languages, available_languages))
|
||||||
|
|
|
@ -26,7 +26,7 @@ import logging
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def match(pattern, string):
|
def match(pattern, string):
|
||||||
|
@ -39,14 +39,13 @@ def match(pattern, string):
|
||||||
|
|
||||||
class TvSubtitles(ServiceBase):
|
class TvSubtitles(ServiceBase):
|
||||||
server_url = 'http://www.tvsubtitles.net'
|
server_url = 'http://www.tvsubtitles.net'
|
||||||
site_url = 'http://www.tvsubtitles.net'
|
|
||||||
api_based = False
|
api_based = False
|
||||||
languages = language_set(['ar', 'bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', 'hu',
|
languages = language_set(['ar', 'bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', 'hu',
|
||||||
'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'uk',
|
'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'uk',
|
||||||
'zh', 'pb'])
|
'zh', 'pt-br'])
|
||||||
#TODO: Find more exceptions
|
#TODO: Find more exceptions
|
||||||
language_map = {'gr': Language('gre'), 'cz': Language('cze'), 'ua': Language('ukr'),
|
language_map = {'gr': Language('gre'), 'cz': Language('cze'), 'ua': Language('ukr'),
|
||||||
'cn': Language('chi'), 'br': Language('pob')}
|
'cn': Language('chi')}
|
||||||
videos = [Episode]
|
videos = [Episode]
|
||||||
require_video = False
|
require_video = False
|
||||||
required_features = ['permissive']
|
required_features = ['permissive']
|
||||||
|
|
|
@ -1,99 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2013 Julien Goret <jgoret@gmail.com>
|
|
||||||
#
|
|
||||||
# This file is part of subliminal.
|
|
||||||
#
|
|
||||||
# subliminal is free software; you can redistribute it and/or modify it under
|
|
||||||
# the terms of the GNU Lesser General Public License as published by
|
|
||||||
# the Free Software Foundation; either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# subliminal is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
from . import ServiceBase
|
|
||||||
from ..exceptions import ServiceError
|
|
||||||
from ..language import language_set, Language
|
|
||||||
from ..subtitles import get_subtitle_path, ResultSubtitle
|
|
||||||
from ..utils import get_keywords, split_keyword
|
|
||||||
from ..videos import Episode
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
import logging
|
|
||||||
import urllib
|
|
||||||
|
|
||||||
logger = logging.getLogger("subliminal")
|
|
||||||
|
|
||||||
class Usub(ServiceBase):
|
|
||||||
server_url = 'http://www.u-sub.net/sous-titres'
|
|
||||||
site_url = 'http://www.u-sub.net/'
|
|
||||||
api_based = False
|
|
||||||
languages = language_set(['fr'])
|
|
||||||
videos = [Episode]
|
|
||||||
require_video = False
|
|
||||||
#required_features = ['permissive']
|
|
||||||
|
|
||||||
def list_checked(self, video, languages):
|
|
||||||
return self.query(video.path or video.release, languages, get_keywords(video.guess), series=video.series, season=video.season, episode=video.episode)
|
|
||||||
|
|
||||||
def query(self, filepath, languages, keywords=None, series=None, season=None, episode=None):
|
|
||||||
|
|
||||||
## Check if we really got informations about our episode
|
|
||||||
if series and season and episode:
|
|
||||||
request_series = series.lower().replace(' ', '-')
|
|
||||||
if isinstance(request_series, unicode):
|
|
||||||
request_series = request_series.encode('utf-8')
|
|
||||||
logger.debug(u'Getting subtitles for %s season %d episode %d with language %r' % (series, season, episode, languages))
|
|
||||||
r = self.session.get('%s/%s/saison_%s' % (self.server_url, urllib.quote(request_series),season))
|
|
||||||
if r.status_code == 404:
|
|
||||||
print "Error 404"
|
|
||||||
logger.debug(u'Could not find subtitles for %s' % (series))
|
|
||||||
return []
|
|
||||||
else:
|
|
||||||
print "One or more parameter missing"
|
|
||||||
raise ServiceError('One or more parameter missing')
|
|
||||||
|
|
||||||
## Check if we didn't got an big and nasty http error
|
|
||||||
if r.status_code != 200:
|
|
||||||
print u'Request %s returned status code %d' % (r.url, r.status_code)
|
|
||||||
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
|
|
||||||
return []
|
|
||||||
|
|
||||||
## Editing episode informations to be able to use it with our search
|
|
||||||
if episode < 10 :
|
|
||||||
episode_num='0'+str(episode)
|
|
||||||
else :
|
|
||||||
episode_num=str(episode)
|
|
||||||
season_num = str(season)
|
|
||||||
series_name = series.lower().replace(' ', '.')
|
|
||||||
possible_episode_naming = [season_num+'x'+episode_num,season_num+episode_num]
|
|
||||||
|
|
||||||
|
|
||||||
## Actually parsing the page for the good subtitles
|
|
||||||
soup = BeautifulSoup(r.content, self.required_features)
|
|
||||||
subtitles = []
|
|
||||||
subtitles_list = soup.find('table', {'id' : 'subtitles_list'})
|
|
||||||
link_list = subtitles_list.findAll('a', {'class' : 'dl_link'})
|
|
||||||
|
|
||||||
for link in link_list :
|
|
||||||
link_url = link.get('href')
|
|
||||||
splited_link = link_url.split('/')
|
|
||||||
filename = splited_link[len(splited_link)-1]
|
|
||||||
for episode_naming in possible_episode_naming :
|
|
||||||
if episode_naming in filename :
|
|
||||||
for language in languages:
|
|
||||||
path = get_subtitle_path(filepath, language, self.config.multi)
|
|
||||||
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s' % (link_url))
|
|
||||||
subtitles.append(subtitle)
|
|
||||||
return subtitles
|
|
||||||
|
|
||||||
def download(self, subtitle):
|
|
||||||
## All downloaded files are zip files
|
|
||||||
self.download_zip_file(subtitle.link, subtitle.path)
|
|
||||||
return subtitle
|
|
||||||
|
|
||||||
|
|
||||||
Service = Usub
|
|
|
@ -26,13 +26,10 @@ import mimetypes
|
||||||
import os
|
import os
|
||||||
import struct
|
import struct
|
||||||
|
|
||||||
from sickbeard import encodingKludge as ek
|
|
||||||
import sickbeard
|
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['EXTENSIONS', 'MIMETYPES', 'Video', 'Episode', 'Movie', 'UnknownVideo',
|
__all__ = ['EXTENSIONS', 'MIMETYPES', 'Video', 'Episode', 'Movie', 'UnknownVideo',
|
||||||
'scan', 'hash_opensubtitles', 'hash_thesubdb']
|
'scan', 'hash_opensubtitles', 'hash_thesubdb']
|
||||||
logger = logging.getLogger("subliminal")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
#: Video extensions
|
#: Video extensions
|
||||||
EXTENSIONS = ['.avi', '.mkv', '.mpg', '.mp4', '.m4v', '.mov', '.ogm', '.ogv', '.wmv',
|
EXTENSIONS = ['.avi', '.mkv', '.mpg', '.mp4', '.m4v', '.mov', '.ogm', '.ogv', '.wmv',
|
||||||
|
@ -58,10 +55,6 @@ class Video(object):
|
||||||
self.imdbid = imdbid
|
self.imdbid = imdbid
|
||||||
self._path = None
|
self._path = None
|
||||||
self.hashes = {}
|
self.hashes = {}
|
||||||
|
|
||||||
if isinstance(path, unicode):
|
|
||||||
path = path.encode('utf-8')
|
|
||||||
|
|
||||||
if os.path.exists(path):
|
if os.path.exists(path):
|
||||||
self._path = path
|
self._path = path
|
||||||
self.size = os.path.getsize(self._path)
|
self.size = os.path.getsize(self._path)
|
||||||
|
@ -145,10 +138,6 @@ class Video(object):
|
||||||
if folder == '':
|
if folder == '':
|
||||||
folder = '.'
|
folder = '.'
|
||||||
existing = [f for f in os.listdir(folder) if f.startswith(basename)]
|
existing = [f for f in os.listdir(folder) if f.startswith(basename)]
|
||||||
if sickbeard.SUBTITLES_DIR:
|
|
||||||
subsDir = ek.ek(os.path.join, folder, sickbeard.SUBTITLES_DIR)
|
|
||||||
if ek.ek(os.path.isdir, subsDir):
|
|
||||||
existing.extend([f for f in os.listdir(subsDir) if f.startswith(basename)])
|
|
||||||
for path in existing:
|
for path in existing:
|
||||||
for ext in subtitles.EXTENSIONS:
|
for ext in subtitles.EXTENSIONS:
|
||||||
if path.endswith(ext):
|
if path.endswith(ext):
|
||||||
|
@ -225,9 +214,6 @@ def scan(entry, max_depth=3, scan_filter=None, depth=0):
|
||||||
:rtype: list of (:class:`Video`, [:class:`~subliminal.subtitles.Subtitle`])
|
:rtype: list of (:class:`Video`, [:class:`~subliminal.subtitles.Subtitle`])
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if isinstance(entry, unicode):
|
|
||||||
entry = entry.encode('utf-8')
|
|
||||||
|
|
||||||
if depth > max_depth and max_depth != 0: # we do not want to search the whole file system except if max_depth = 0
|
if depth > max_depth and max_depth != 0: # we do not want to search the whole file system except if max_depth = 0
|
||||||
return []
|
return []
|
||||||
if os.path.isdir(entry): # a dir? recurse
|
if os.path.isdir(entry): # a dir? recurse
|
||||||
|
|
|
@ -408,11 +408,10 @@ def symlink(src, dst):
|
||||||
if os.name == 'nt':
|
if os.name == 'nt':
|
||||||
import ctypes
|
import ctypes
|
||||||
|
|
||||||
if ctypes.windll.kernel32.CreateSymbolicLinkW(unicode(dst), unicode(src), 1 if os.path.isdir(src) else 0) in [0,
|
if ctypes.windll.kernel32.CreateSymbolicLinkW(unicode(dst), unicode(src), 1 if os.path.isdir(src) else 0) in [0,1280]:
|
||||||
1280]: raise ctypes.WinError()
|
raise ctypes.WinError()
|
||||||
else:
|
else:
|
||||||
os.symlink(src, dst)
|
os.symlink(src, dst)
|
||||||
|
|
||||||
|
|
||||||
def moveAndSymlinkFile(srcFile, destFile):
|
def moveAndSymlinkFile(srcFile, destFile):
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -31,7 +31,7 @@ SINGLE = 'und'
|
||||||
|
|
||||||
|
|
||||||
def sortedServiceList():
|
def sortedServiceList():
|
||||||
servicesMapping = dict([(x.lower(), x) for x in subliminal.core.SERVICES])
|
servicesMapping = dict([(x.lower(), x) for x in subliminal.Subtitle.core.Providers])
|
||||||
|
|
||||||
newList = []
|
newList = []
|
||||||
|
|
||||||
|
|
|
@ -1194,8 +1194,10 @@ class TVEpisode(object):
|
||||||
previous_subtitles = self.subtitles
|
previous_subtitles = self.subtitles
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
subliminal.cache_region.configure('dogpile.cache.dbm', arguments={'filename': os.path.join(sickbeard.CACHE_DIR, '/path/to/cachefile.dbm')})
|
||||||
|
videos = subliminal.scan_videos([self.location], subtitles=True, embedded_subtitles=True)
|
||||||
need_languages = set(sickbeard.SUBTITLES_LANGUAGES) - set(self.subtitles)
|
need_languages = set(sickbeard.SUBTITLES_LANGUAGES) - set(self.subtitles)
|
||||||
subtitles = subliminal.download_subtitles([self.location], languages=need_languages,
|
subtitles = subliminal.download_best_subtitles([self.location], languages=need_languages,
|
||||||
services=sickbeard.subtitles.getEnabledServiceList(), force=force,
|
services=sickbeard.subtitles.getEnabledServiceList(), force=force,
|
||||||
multi=True, cache_dir=sickbeard.CACHE_DIR)
|
multi=True, cache_dir=sickbeard.CACHE_DIR)
|
||||||
|
|
||||||
|
|
|
@ -1545,8 +1545,8 @@ class CMD_SickBeardRestart(ApiCall):
|
||||||
return _responds(RESULT_SUCCESS, msg="SickBeard is restarting...")
|
return _responds(RESULT_SUCCESS, msg="SickBeard is restarting...")
|
||||||
|
|
||||||
|
|
||||||
class CMD_SickBeardSearchTVDB(ApiCall):
|
class CMD_SickBeardSearchIndexers(ApiCall):
|
||||||
_help = {"desc": "search for show at tvdb with a given string and language",
|
_help = {"desc": "search for show on the indexers with a given string and language",
|
||||||
"optionalParameters": {"name": {"desc": "name of the show you want to search for"},
|
"optionalParameters": {"name": {"desc": "name of the show you want to search for"},
|
||||||
"indexerid": {"desc": "thetvdb.com unique id of a show"},
|
"indexerid": {"desc": "thetvdb.com unique id of a show"},
|
||||||
"lang": {"desc": "the 2 letter abbreviation lang id"}
|
"lang": {"desc": "the 2 letter abbreviation lang id"}
|
||||||
|
@ -1820,17 +1820,17 @@ class CMD_ShowAddExisting(ApiCall):
|
||||||
if not ek.ek(os.path.isdir, self.location):
|
if not ek.ek(os.path.isdir, self.location):
|
||||||
return _responds(RESULT_FAILURE, msg='Not a valid location')
|
return _responds(RESULT_FAILURE, msg='Not a valid location')
|
||||||
|
|
||||||
tvdbName = None
|
indexerName = None
|
||||||
tvdbResult = CMD_SickBeardSearchTVDB([], {"indexerid": self.indexerid}).run()
|
indexerResult = CMD_SickBeardSearchIndexers([], {"indexerid": self.indexerid}).run()
|
||||||
|
|
||||||
if tvdbResult['result'] == result_type_map[RESULT_SUCCESS]:
|
if indexerResult['result'] == result_type_map[RESULT_SUCCESS]:
|
||||||
if not tvdbResult['data']['results']:
|
if not indexerResult['data']['results']:
|
||||||
return _responds(RESULT_FAILURE, msg="Empty results returned, check indexerid and try again")
|
return _responds(RESULT_FAILURE, msg="Empty results returned, check indexerid and try again")
|
||||||
if len(tvdbResult['data']['results']) == 1 and 'name' in tvdbResult['data']['results'][0]:
|
if len(indexerResult['data']['results']) == 1 and 'name' in indexerResult['data']['results'][0]:
|
||||||
tvdbName = tvdbResult['data']['results'][0]['name']
|
indexerName = indexerResult['data']['results'][0]['name']
|
||||||
|
|
||||||
if not tvdbName:
|
if not indexerName:
|
||||||
return _responds(RESULT_FAILURE, msg="Unable to retrieve information from tvdb")
|
return _responds(RESULT_FAILURE, msg="Unable to retrieve information from indexer")
|
||||||
|
|
||||||
quality_map = {'sdtv': Quality.SDTV,
|
quality_map = {'sdtv': Quality.SDTV,
|
||||||
'sddvd': Quality.SDDVD,
|
'sddvd': Quality.SDDVD,
|
||||||
|
@ -1860,12 +1860,12 @@ class CMD_ShowAddExisting(ApiCall):
|
||||||
|
|
||||||
sickbeard.showQueueScheduler.action.addShow(int(self.indexerid), self.location, SKIPPED, newQuality,
|
sickbeard.showQueueScheduler.action.addShow(int(self.indexerid), self.location, SKIPPED, newQuality,
|
||||||
int(self.flatten_folders)) #@UndefinedVariable
|
int(self.flatten_folders)) #@UndefinedVariable
|
||||||
return _responds(RESULT_SUCCESS, {"name": tvdbName}, tvdbName + " has been queued to be added")
|
return _responds(RESULT_SUCCESS, {"name": indexerName}, indexerName + " has been queued to be added")
|
||||||
|
|
||||||
|
|
||||||
class CMD_ShowAddNew(ApiCall):
|
class CMD_ShowAddNew(ApiCall):
|
||||||
_help = {"desc": "add a new show to sickbeard",
|
_help = {"desc": "add a new show to sickbeard",
|
||||||
"requiredParameters": {"indexerid": {"desc": "thetvdb.com unique id of a show"}
|
"requiredParameters": {"indexerid": {"desc": "thetvdb.com or tvrage.com unique id of a show"}
|
||||||
},
|
},
|
||||||
"optionalParameters": {"initial": {"desc": "initial quality for the show"},
|
"optionalParameters": {"initial": {"desc": "initial quality for the show"},
|
||||||
"location": {"desc": "base path for where the show folder is to be created"},
|
"location": {"desc": "base path for where the show folder is to be created"},
|
||||||
|
@ -1964,20 +1964,20 @@ class CMD_ShowAddNew(ApiCall):
|
||||||
return _responds(RESULT_FAILURE, msg="Status prohibited")
|
return _responds(RESULT_FAILURE, msg="Status prohibited")
|
||||||
newStatus = self.status
|
newStatus = self.status
|
||||||
|
|
||||||
tvdbName = None
|
indexerName = None
|
||||||
tvdbResult = CMD_SickBeardSearchTVDB([], {"indexerid": self.indexerid}).run()
|
indexerResult = CMD_SickBeardSearchTVDB([], {"indexerid": self.indexerid}).run()
|
||||||
|
|
||||||
if tvdbResult['result'] == result_type_map[RESULT_SUCCESS]:
|
if indexerResult['result'] == result_type_map[RESULT_SUCCESS]:
|
||||||
if not tvdbResult['data']['results']:
|
if not indexerResult['data']['results']:
|
||||||
return _responds(RESULT_FAILURE, msg="Empty results returned, check indexerid and try again")
|
return _responds(RESULT_FAILURE, msg="Empty results returned, check indexerid and try again")
|
||||||
if len(tvdbResult['data']['results']) == 1 and 'name' in tvdbResult['data']['results'][0]:
|
if len(indexerResult['data']['results']) == 1 and 'name' in indexerResult['data']['results'][0]:
|
||||||
tvdbName = tvdbResult['data']['results'][0]['name']
|
indexerName = indexerResult['data']['results'][0]['name']
|
||||||
|
|
||||||
if not tvdbName:
|
if not indexerName:
|
||||||
return _responds(RESULT_FAILURE, msg="Unable to retrieve information from tvdb")
|
return _responds(RESULT_FAILURE, msg="Unable to retrieve information from indexer")
|
||||||
|
|
||||||
# moved the logic check to the end in an attempt to eliminate empty directory being created from previous errors
|
# moved the logic check to the end in an attempt to eliminate empty directory being created from previous errors
|
||||||
showPath = ek.ek(os.path.join, self.location, helpers.sanitizeFileName(tvdbName))
|
showPath = ek.ek(os.path.join, self.location, helpers.sanitizeFileName(indexerName))
|
||||||
|
|
||||||
# don't create show dir if config says not to
|
# don't create show dir if config says not to
|
||||||
if sickbeard.ADD_SHOWS_WO_DIR:
|
if sickbeard.ADD_SHOWS_WO_DIR:
|
||||||
|
@ -1994,7 +1994,7 @@ class CMD_ShowAddNew(ApiCall):
|
||||||
sickbeard.showQueueScheduler.action.addShow(int(self.indexerid), showPath, newStatus, newQuality,
|
sickbeard.showQueueScheduler.action.addShow(int(self.indexerid), showPath, newStatus, newQuality,
|
||||||
int(self.flatten_folders), self.subtitles,
|
int(self.flatten_folders), self.subtitles,
|
||||||
self.lang) #@UndefinedVariable
|
self.lang) #@UndefinedVariable
|
||||||
return _responds(RESULT_SUCCESS, {"name": tvdbName}, tvdbName + " has been queued to be added")
|
return _responds(RESULT_SUCCESS, {"name": indexerName}, indexerName + " has been queued to be added")
|
||||||
|
|
||||||
|
|
||||||
class CMD_ShowCache(ApiCall):
|
class CMD_ShowCache(ApiCall):
|
||||||
|
@ -2578,7 +2578,7 @@ _functionMaper = {"help": CMD_Help,
|
||||||
"sb.pausebacklog": CMD_SickBeardPauseBacklog,
|
"sb.pausebacklog": CMD_SickBeardPauseBacklog,
|
||||||
"sb.ping": CMD_SickBeardPing,
|
"sb.ping": CMD_SickBeardPing,
|
||||||
"sb.restart": CMD_SickBeardRestart,
|
"sb.restart": CMD_SickBeardRestart,
|
||||||
"sb.searchtvdb": CMD_SickBeardSearchTVDB,
|
"sb.searchtvdb": CMD_SickBeardSearchIndexers,
|
||||||
"sb.setdefaults": CMD_SickBeardSetDefaults,
|
"sb.setdefaults": CMD_SickBeardSetDefaults,
|
||||||
"sb.shutdown": CMD_SickBeardShutdown,
|
"sb.shutdown": CMD_SickBeardShutdown,
|
||||||
"show": CMD_Show,
|
"show": CMD_Show,
|
||||||
|
|
|
@ -2889,7 +2889,7 @@ class Home:
|
||||||
air_by_date = config.checkbox_to_value(air_by_date)
|
air_by_date = config.checkbox_to_value(air_by_date)
|
||||||
subtitles = config.checkbox_to_value(subtitles)
|
subtitles = config.checkbox_to_value(subtitles)
|
||||||
|
|
||||||
indexer_lang = showObj.lang
|
indexer_lang = indexerLang
|
||||||
|
|
||||||
# if we changed the language then kick off an update
|
# if we changed the language then kick off an update
|
||||||
if indexer_lang == showObj.lang:
|
if indexer_lang == showObj.lang:
|
||||||
|
|
Loading…
Reference in a new issue