mirror of
https://github.com/SickGear/SickGear.git
synced 2024-12-04 18:33:38 +00:00
980e05cc99
Backported 400 revisions from rev 1de4961-8897c5b (2018-2014). Move core/benchmark, core/cmd_line, core/memory, core/profiler and core/timeout to core/optional/* Remove metadata/qt* PORT: Version 2.0a3 (inline with 3.0a3 @ f80c7d5). Basic Support for XMP Packets. tga: improvements to adhere more closely to the spec. pdf: slightly improved parsing. rar: fix TypeError on unknown block types. Add MacRoman win32 codepage. tiff/exif: support SubIFDs and tiled images. Add method to export metadata in dictionary. mpeg_video: don't attempt to parse Stream past length. mpeg_video: parse ESCR correctly, add SCR value. Change centralise CustomFragments. field: don't set parser class if class is None, to enable autodetect. field: add value/display for CustomFragment. parser: inline warning to enable tracebacks in debug mode. Fix empty bytestrings in makePrintable. Fix contentSize in jpeg.py to account for image_data blocks. Fix the ELF parser. Enhance the AR archive parser. elf parser: fix wrong wrong fields order in parsing little endian section flags. elf parser: add s390 as a machine type. Flesh out mp4 parser. PORT: Version 2.0a1 (inline with 3.0a1). Major refactoring and PEP8. Fix ResourceWarning warnings on files. Add a close() method and support for the context manager protocol ("with obj: ...") to parsers, input and output streams. metadata: get comment from ZIP. Support for InputIOStream.read(0). Fix sizeGe when size is None. Remove unused new_seekable_field_set file. Remove parser Mapsforge .map. Remove parser Parallel Realities Starfighter .pak files. sevenzip: fix for newer archives. java: update access flags and modifiers for Java 1.7 and update description text for most recent Java. Support ustar prefix field in tar archives. Remove file_system* parsers. Remove misc parsers 3d0, 3ds, gnome_keyring, msoffice*, mstask, ole*, word*. Remove program parsers macho, nds, prc. Support non-8bit Character subclasses. Python parser supports Python 3.7. Enhance mpeg_ts parser to support MTS/M2TS. Support for creation date in tiff. Change don't hardcode errno constant. PORT: 1.9.1 Internal Only: The following are legacy reference to upstream commit messages. Relevant changes up to b0a115f8. Use integer division. Replace HACHOIR_ERRORS with Exception. Fix metadata.Data: make it sortable. Import fixes from e7de492. PORT: Version 2.0a1 (inline with 3.0a1 @ e9f8fad). Replace hachoir.core.field with hachoir.field Replace hachoir.core.stream with hachoir.stream Remove the compatibility module for PY1.5 to PY2.5. metadata: support TIFF picture. metadata: fix string normalization. metadata: fix datetime regex Fix hachoir bug #57. FileFromInputStream: fix comparison between None and an int. InputIOStream: open the file in binary mode.
212 lines
6.6 KiB
Python
212 lines
6.6 KiB
Python
from hachoir.core.tools import humanDurationNanosec
|
|
from hachoir.core.i18n import _
|
|
from math import floor
|
|
from time import time
|
|
|
|
|
|
class BenchmarkError(Exception):
|
|
"""
|
|
Error during benchmark, use str(err) to format it as string.
|
|
"""
|
|
|
|
def __init__(self, message):
|
|
Exception.__init__(self, "Benchmark internal error: %s" % message)
|
|
|
|
|
|
class BenchmarkStat:
|
|
"""
|
|
Benchmark statistics. This class automatically computes minimum value,
|
|
maximum value and sum of all values.
|
|
|
|
Methods:
|
|
- append(value): append a value
|
|
- getMin(): minimum value
|
|
- getMax(): maximum value
|
|
- getSum(): sum of all values
|
|
- __len__(): get number of elements
|
|
- __nonzero__(): isn't empty?
|
|
"""
|
|
|
|
def __init__(self):
|
|
self._values = []
|
|
|
|
def append(self, value):
|
|
self._values.append(value)
|
|
try:
|
|
self._min = min(self._min, value)
|
|
self._max = max(self._max, value)
|
|
self._sum += value
|
|
except AttributeError:
|
|
self._min = value
|
|
self._max = value
|
|
self._sum = value
|
|
|
|
def __len__(self):
|
|
return len(self._values)
|
|
|
|
def __nonzero__(self):
|
|
return bool(self._values)
|
|
|
|
def getMin(self):
|
|
return self._min
|
|
|
|
def getMax(self):
|
|
return self._max
|
|
|
|
def getSum(self):
|
|
return self._sum
|
|
|
|
|
|
class Benchmark:
|
|
def __init__(self, max_time=5.0,
|
|
min_count=5, max_count=None, progress_time=1.0):
|
|
"""
|
|
Constructor:
|
|
- max_time: Maximum wanted duration of the whole benchmark
|
|
(default: 5 seconds, minimum: 1 second).
|
|
- min_count: Minimum number of function calls to get good statistics
|
|
(defaut: 5, minimum: 1).
|
|
- progress_time: Time between each "progress" message
|
|
(default: 1 second, minimum: 250 ms).
|
|
- max_count: Maximum number of function calls (default: no limit).
|
|
- verbose: Is verbose? (default: False)
|
|
- disable_gc: Disable garbage collector? (default: False)
|
|
"""
|
|
self.max_time = max(max_time, 1.0)
|
|
self.min_count = max(min_count, 1)
|
|
self.max_count = max_count
|
|
self.progress_time = max(progress_time, 0.25)
|
|
self.verbose = False
|
|
self.disable_gc = False
|
|
|
|
def formatTime(self, value):
|
|
"""
|
|
Format a time delta to string: use humanDurationNanosec()
|
|
"""
|
|
return humanDurationNanosec(value * 1000000000)
|
|
|
|
def displayStat(self, stat):
|
|
"""
|
|
Display statistics to stdout:
|
|
- best time (minimum)
|
|
- average time (arithmetic average)
|
|
- worst time (maximum)
|
|
- total time (sum)
|
|
|
|
Use arithmetic avertage instead of geometric average because
|
|
geometric fails if any value is zero (returns zero) and also
|
|
because floating point multiplication lose precision with many
|
|
values.
|
|
"""
|
|
average = stat.getSum() / len(stat)
|
|
values = (stat.getMin(), average, stat.getMax(), stat.getSum())
|
|
values = tuple(self.formatTime(value) for value in values)
|
|
print _("Benchmark: best=%s average=%s worst=%s total=%s") % values
|
|
|
|
def _runOnce(self, func, args, kw):
|
|
before = time()
|
|
func(*args, **kw)
|
|
after = time()
|
|
return after - before
|
|
|
|
def _run(self, func, args, kw):
|
|
"""
|
|
Call func(*args, **kw) as many times as needed to get
|
|
good statistics. Algorithm:
|
|
- call the function once
|
|
- compute needed number of calls
|
|
- and then call function N times
|
|
|
|
To compute number of calls, parameters are:
|
|
- time of first function call
|
|
- minimum number of calls (min_count attribute)
|
|
- maximum test time (max_time attribute)
|
|
|
|
Notice: The function will approximate number of calls.
|
|
"""
|
|
# First call of the benchmark
|
|
stat = BenchmarkStat()
|
|
diff = self._runOnce(func, args, kw)
|
|
best = diff
|
|
stat.append(diff)
|
|
total_time = diff
|
|
|
|
# Compute needed number of calls
|
|
count = int(floor(self.max_time / diff))
|
|
count = max(count, self.min_count)
|
|
if self.max_count:
|
|
count = min(count, self.max_count)
|
|
|
|
# Not other call? Just exit
|
|
if count == 1:
|
|
return stat
|
|
estimate = diff * count
|
|
if self.verbose:
|
|
print _("Run benchmark: %s calls (estimate: %s)") \
|
|
% (count, self.formatTime(estimate))
|
|
|
|
display_progress = self.verbose and (1.0 <= estimate)
|
|
total_count = 1
|
|
while total_count < count:
|
|
# Run benchmark and display each result
|
|
if display_progress:
|
|
print _("Result %s/%s: %s (best: %s)") % \
|
|
(total_count, count,
|
|
self.formatTime(diff), self.formatTime(best))
|
|
part = count - total_count
|
|
|
|
# Will takes more than one second?
|
|
average = total_time / total_count
|
|
if self.progress_time < part * average:
|
|
part = max(int(self.progress_time / average), 1)
|
|
for index in xrange(part):
|
|
diff = self._runOnce(func, args, kw)
|
|
stat.append(diff)
|
|
total_time += diff
|
|
best = min(diff, best)
|
|
total_count += part
|
|
if display_progress:
|
|
print _("Result %s/%s: %s (best: %s)") % \
|
|
(count, count,
|
|
self.formatTime(diff), self.formatTime(best))
|
|
return stat
|
|
|
|
def validateStat(self, stat):
|
|
"""
|
|
Check statistics and raise a BenchmarkError if they are invalid.
|
|
Example of tests: reject empty stat, reject stat with only nul values.
|
|
"""
|
|
if not stat:
|
|
raise BenchmarkError("empty statistics")
|
|
if not stat.getSum():
|
|
raise BenchmarkError("nul statistics")
|
|
|
|
def run(self, func, *args, **kw):
|
|
"""
|
|
Run function func(*args, **kw), validate statistics,
|
|
and display the result on stdout.
|
|
|
|
Disable garbage collector if asked too.
|
|
"""
|
|
|
|
# Disable garbarge collector is needed and if it does exist
|
|
# (Jython 2.2 don't have it for example)
|
|
if self.disable_gc:
|
|
try:
|
|
import gc
|
|
except ImportError:
|
|
self.disable_gc = False
|
|
if self.disable_gc:
|
|
gc_enabled = gc.isenabled()
|
|
gc.disable()
|
|
else:
|
|
gc_enabled = False
|
|
|
|
# Run the benchmark
|
|
stat = self._run(func, args, kw)
|
|
if gc_enabled:
|
|
gc.enable()
|
|
|
|
# Validate and display stats
|
|
self.validateStat(stat)
|
|
self.displayStat(stat)
|