mirror of
https://github.com/SickGear/SickGear.git
synced 2024-12-05 02:43:37 +00:00
0d9fbc1ad7
This version of SickBeard uses both TVDB and TVRage to search and gather it's series data from allowing you to now have access to and download shows that you couldn't before because of being locked into only what TheTVDB had to offer. Also this edition is based off the code we used in our XEM editon so it does come with scene numbering support as well as all the other features our XEM edition has to offer. Please before using this with your existing database (sickbeard.db) please make a backup copy of it and delete any other database files such as cache.db and failed.db if present, we HIGHLY recommend starting out with no database files at all to make this a fresh start but the choice is at your own risk! Enjoy!
72 lines
2.6 KiB
Python
72 lines
2.6 KiB
Python
from lib.hachoir_core.field import Bytes
|
|
from lib.hachoir_core.tools import makePrintable, humanFilesize
|
|
from lib.hachoir_core.stream import InputIOStream
|
|
|
|
class SubFile(Bytes):
|
|
"""
|
|
File stored in another file
|
|
"""
|
|
def __init__(self, parent, name, length, description=None,
|
|
parser=None, filename=None, mime_type=None, parser_class=None):
|
|
if filename:
|
|
if not isinstance(filename, unicode):
|
|
filename = makePrintable(filename, "ISO-8859-1")
|
|
if not description:
|
|
description = 'File "%s" (%s)' % (filename, humanFilesize(length))
|
|
Bytes.__init__(self, parent, name, length, description)
|
|
def createInputStream(cis, **args):
|
|
tags = args.setdefault("tags",[])
|
|
if parser_class:
|
|
tags.append(( "class", parser_class ))
|
|
if parser is not None:
|
|
tags.append(( "id", parser.PARSER_TAGS["id"] ))
|
|
if mime_type:
|
|
tags.append(( "mime", mime_type ))
|
|
if filename:
|
|
tags.append(( "filename", filename ))
|
|
return cis(**args)
|
|
self.setSubIStream(createInputStream)
|
|
|
|
class CompressedStream:
|
|
offset = 0
|
|
|
|
def __init__(self, stream, decompressor):
|
|
self.stream = stream
|
|
self.decompressor = decompressor(stream)
|
|
self._buffer = ''
|
|
|
|
def read(self, size):
|
|
d = self._buffer
|
|
data = [ d[:size] ]
|
|
size -= len(d)
|
|
if size > 0:
|
|
d = self.decompressor(size)
|
|
data.append(d[:size])
|
|
size -= len(d)
|
|
while size > 0:
|
|
n = 4096
|
|
if self.stream.size:
|
|
n = min(self.stream.size - self.offset, n)
|
|
if not n:
|
|
break
|
|
d = self.stream.read(self.offset, n)[1]
|
|
self.offset += 8 * len(d)
|
|
d = self.decompressor(size, d)
|
|
data.append(d[:size])
|
|
size -= len(d)
|
|
self._buffer = d[size+len(d):]
|
|
return ''.join(data)
|
|
|
|
def CompressedField(field, decompressor):
|
|
def createInputStream(cis, source=None, **args):
|
|
if field._parent:
|
|
stream = cis(source=source)
|
|
args.setdefault("tags", []).extend(stream.tags)
|
|
else:
|
|
stream = field.stream
|
|
input = CompressedStream(stream, decompressor)
|
|
if source is None:
|
|
source = "Compressed source: '%s' (offset=%s)" % (stream.source, field.absolute_address)
|
|
return InputIOStream(input, source=source, **args)
|
|
field.setSubIStream(createInputStream)
|
|
return field
|