mirror of
https://github.com/SickGear/SickGear.git
synced 2024-12-18 08:43:37 +00:00
Merge branch 'feature/UpdateChardet' into dev
This commit is contained in:
commit
eacfd57a85
49 changed files with 9067 additions and 5845 deletions
|
@ -6,6 +6,7 @@
|
|||
* Remove lockfile no longer used by cachecontrol
|
||||
* Update Msgpack 1.0.0 (fa7d744) to 1.0.4 (b5acfd5)
|
||||
* Update certifi 2022.09.24 to 2022.12.07
|
||||
* Update chardet packages 4.0.0 (b3d867a) to 5.1.0 (8087f00)
|
||||
* Update diskcache 5.1.0 (40ce0de) to 5.4.0 (1cb1425)
|
||||
* Update feedparser 6.0.1 (98d189fa) to 6.0.10 (5fcb3ae)
|
||||
* Update humanize 3.5.0 (b6b0ea5) to 4.0.0 (a1514eb)
|
||||
|
|
|
@ -15,68 +15,101 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from typing import List, Union
|
||||
|
||||
from .universaldetector import UniversalDetector
|
||||
from .charsetgroupprober import CharSetGroupProber
|
||||
from .charsetprober import CharSetProber
|
||||
from .enums import InputState
|
||||
from .version import __version__, VERSION
|
||||
from .resultdict import ResultDict
|
||||
from .universaldetector import UniversalDetector
|
||||
from .version import VERSION, __version__
|
||||
|
||||
__all__ = ["UniversalDetector", "detect", "detect_all", "__version__", "VERSION"]
|
||||
|
||||
|
||||
__all__ = ['UniversalDetector', 'detect', 'detect_all', '__version__', 'VERSION']
|
||||
|
||||
|
||||
def detect(byte_str):
|
||||
def detect(
|
||||
byte_str: Union[bytes, bytearray], should_rename_legacy: bool = False
|
||||
) -> ResultDict:
|
||||
"""
|
||||
Detect the encoding of the given byte string.
|
||||
|
||||
:param byte_str: The byte sequence to examine.
|
||||
:type byte_str: ``bytes`` or ``bytearray``
|
||||
:param should_rename_legacy: Should we rename legacy encodings
|
||||
to their more modern equivalents?
|
||||
:type should_rename_legacy: ``bool``
|
||||
"""
|
||||
if not isinstance(byte_str, bytearray):
|
||||
if not isinstance(byte_str, bytes):
|
||||
raise TypeError('Expected object of type bytes or bytearray, got: '
|
||||
'{0}'.format(type(byte_str)))
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
|
||||
)
|
||||
byte_str = bytearray(byte_str)
|
||||
detector = UniversalDetector()
|
||||
detector = UniversalDetector(should_rename_legacy=should_rename_legacy)
|
||||
detector.feed(byte_str)
|
||||
return detector.close()
|
||||
|
||||
|
||||
def detect_all(byte_str):
|
||||
def detect_all(
|
||||
byte_str: Union[bytes, bytearray],
|
||||
ignore_threshold: bool = False,
|
||||
should_rename_legacy: bool = False,
|
||||
) -> List[ResultDict]:
|
||||
"""
|
||||
Detect all the possible encodings of the given byte string.
|
||||
|
||||
:param byte_str: The byte sequence to examine.
|
||||
:type byte_str: ``bytes`` or ``bytearray``
|
||||
:param ignore_threshold: Include encodings that are below
|
||||
``UniversalDetector.MINIMUM_THRESHOLD``
|
||||
in results.
|
||||
:type ignore_threshold: ``bool``
|
||||
:param should_rename_legacy: Should we rename legacy encodings
|
||||
to their more modern equivalents?
|
||||
:type should_rename_legacy: ``bool``
|
||||
"""
|
||||
if not isinstance(byte_str, bytearray):
|
||||
if not isinstance(byte_str, bytes):
|
||||
raise TypeError('Expected object of type bytes or bytearray, got: '
|
||||
'{0}'.format(type(byte_str)))
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
|
||||
)
|
||||
byte_str = bytearray(byte_str)
|
||||
|
||||
detector = UniversalDetector()
|
||||
detector = UniversalDetector(should_rename_legacy=should_rename_legacy)
|
||||
detector.feed(byte_str)
|
||||
detector.close()
|
||||
|
||||
if detector._input_state == InputState.HIGH_BYTE:
|
||||
results = []
|
||||
for prober in detector._charset_probers:
|
||||
if prober.get_confidence() > detector.MINIMUM_THRESHOLD:
|
||||
charset_name = prober.charset_name
|
||||
lower_charset_name = prober.charset_name.lower()
|
||||
if detector.input_state == InputState.HIGH_BYTE:
|
||||
results: List[ResultDict] = []
|
||||
probers: List[CharSetProber] = []
|
||||
for prober in detector.charset_probers:
|
||||
if isinstance(prober, CharSetGroupProber):
|
||||
probers.extend(p for p in prober.probers)
|
||||
else:
|
||||
probers.append(prober)
|
||||
for prober in probers:
|
||||
if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD:
|
||||
charset_name = prober.charset_name or ""
|
||||
lower_charset_name = charset_name.lower()
|
||||
# Use Windows encoding name instead of ISO-8859 if we saw any
|
||||
# extra Windows-specific bytes
|
||||
if lower_charset_name.startswith('iso-8859'):
|
||||
if detector._has_win_bytes:
|
||||
charset_name = detector.ISO_WIN_MAP.get(lower_charset_name,
|
||||
charset_name)
|
||||
results.append({
|
||||
'encoding': charset_name,
|
||||
'confidence': prober.get_confidence()
|
||||
})
|
||||
if lower_charset_name.startswith("iso-8859") and detector.has_win_bytes:
|
||||
charset_name = detector.ISO_WIN_MAP.get(
|
||||
lower_charset_name, charset_name
|
||||
)
|
||||
# Rename legacy encodings with superset encodings if asked
|
||||
if should_rename_legacy:
|
||||
charset_name = detector.LEGACY_MAP.get(
|
||||
charset_name.lower(), charset_name
|
||||
)
|
||||
results.append(
|
||||
{
|
||||
"encoding": charset_name,
|
||||
"confidence": prober.get_confidence(),
|
||||
"language": prober.language,
|
||||
}
|
||||
)
|
||||
if len(results) > 0:
|
||||
return sorted(results, key=lambda result: -result['confidence'])
|
||||
return sorted(results, key=lambda result: -result["confidence"])
|
||||
|
||||
return [detector.result]
|
||||
|
|
|
@ -44,7 +44,7 @@ BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
|
|||
|
||||
# Char to FreqOrder table
|
||||
BIG5_TABLE_SIZE = 5376
|
||||
|
||||
# fmt: off
|
||||
BIG5_CHAR_TO_FREQ_ORDER = (
|
||||
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
|
||||
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
|
||||
|
@ -383,4 +383,4 @@ BIG5_CHAR_TO_FREQ_ORDER = (
|
|||
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
|
||||
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376
|
||||
)
|
||||
|
||||
# fmt: on
|
||||
|
|
|
@ -25,23 +25,23 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from .mbcharsetprober import MultiByteCharSetProber
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .chardistribution import Big5DistributionAnalysis
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .mbcharsetprober import MultiByteCharSetProber
|
||||
from .mbcssm import BIG5_SM_MODEL
|
||||
|
||||
|
||||
class Big5Prober(MultiByteCharSetProber):
|
||||
def __init__(self):
|
||||
super(Big5Prober, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.coding_sm = CodingStateMachine(BIG5_SM_MODEL)
|
||||
self.distribution_analyzer = Big5DistributionAnalysis()
|
||||
self.reset()
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> str:
|
||||
return "Big5"
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
def language(self) -> str:
|
||||
return "Chinese"
|
||||
|
|
|
@ -25,40 +25,58 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from .euctwfreq import (EUCTW_CHAR_TO_FREQ_ORDER, EUCTW_TABLE_SIZE,
|
||||
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
|
||||
from .euckrfreq import (EUCKR_CHAR_TO_FREQ_ORDER, EUCKR_TABLE_SIZE,
|
||||
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
|
||||
from .gb2312freq import (GB2312_CHAR_TO_FREQ_ORDER, GB2312_TABLE_SIZE,
|
||||
GB2312_TYPICAL_DISTRIBUTION_RATIO)
|
||||
from .big5freq import (BIG5_CHAR_TO_FREQ_ORDER, BIG5_TABLE_SIZE,
|
||||
BIG5_TYPICAL_DISTRIBUTION_RATIO)
|
||||
from .jisfreq import (JIS_CHAR_TO_FREQ_ORDER, JIS_TABLE_SIZE,
|
||||
JIS_TYPICAL_DISTRIBUTION_RATIO)
|
||||
from typing import Tuple, Union
|
||||
|
||||
from .big5freq import (
|
||||
BIG5_CHAR_TO_FREQ_ORDER,
|
||||
BIG5_TABLE_SIZE,
|
||||
BIG5_TYPICAL_DISTRIBUTION_RATIO,
|
||||
)
|
||||
from .euckrfreq import (
|
||||
EUCKR_CHAR_TO_FREQ_ORDER,
|
||||
EUCKR_TABLE_SIZE,
|
||||
EUCKR_TYPICAL_DISTRIBUTION_RATIO,
|
||||
)
|
||||
from .euctwfreq import (
|
||||
EUCTW_CHAR_TO_FREQ_ORDER,
|
||||
EUCTW_TABLE_SIZE,
|
||||
EUCTW_TYPICAL_DISTRIBUTION_RATIO,
|
||||
)
|
||||
from .gb2312freq import (
|
||||
GB2312_CHAR_TO_FREQ_ORDER,
|
||||
GB2312_TABLE_SIZE,
|
||||
GB2312_TYPICAL_DISTRIBUTION_RATIO,
|
||||
)
|
||||
from .jisfreq import (
|
||||
JIS_CHAR_TO_FREQ_ORDER,
|
||||
JIS_TABLE_SIZE,
|
||||
JIS_TYPICAL_DISTRIBUTION_RATIO,
|
||||
)
|
||||
from .johabfreq import JOHAB_TO_EUCKR_ORDER_TABLE
|
||||
|
||||
|
||||
class CharDistributionAnalysis(object):
|
||||
class CharDistributionAnalysis:
|
||||
ENOUGH_DATA_THRESHOLD = 1024
|
||||
SURE_YES = 0.99
|
||||
SURE_NO = 0.01
|
||||
MINIMUM_DATA_THRESHOLD = 3
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self) -> None:
|
||||
# Mapping table to get frequency order from char order (get from
|
||||
# GetOrder())
|
||||
self._char_to_freq_order = None
|
||||
self._table_size = None # Size of above table
|
||||
self._char_to_freq_order: Tuple[int, ...] = tuple()
|
||||
self._table_size = 0 # Size of above table
|
||||
# This is a constant value which varies from language to language,
|
||||
# used in calculating confidence. See
|
||||
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
|
||||
# for further detail.
|
||||
self.typical_distribution_ratio = None
|
||||
self._done = None
|
||||
self._total_chars = None
|
||||
self._freq_chars = None
|
||||
self.typical_distribution_ratio = 0.0
|
||||
self._done = False
|
||||
self._total_chars = 0
|
||||
self._freq_chars = 0
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
def reset(self) -> None:
|
||||
"""reset analyser, clear any state"""
|
||||
# If this flag is set to True, detection is done and conclusion has
|
||||
# been made
|
||||
|
@ -67,7 +85,7 @@ class CharDistributionAnalysis(object):
|
|||
# The number of characters whose frequency order is less than 512
|
||||
self._freq_chars = 0
|
||||
|
||||
def feed(self, char, char_len):
|
||||
def feed(self, char: Union[bytes, bytearray], char_len: int) -> None:
|
||||
"""feed a character with known length"""
|
||||
if char_len == 2:
|
||||
# we only care about 2-bytes character in our distribution analysis
|
||||
|
@ -81,7 +99,7 @@ class CharDistributionAnalysis(object):
|
|||
if 512 > self._char_to_freq_order[order]:
|
||||
self._freq_chars += 1
|
||||
|
||||
def get_confidence(self):
|
||||
def get_confidence(self) -> float:
|
||||
"""return confidence based on existing data"""
|
||||
# if we didn't receive any character in our consideration range,
|
||||
# return negative answer
|
||||
|
@ -89,20 +107,21 @@ class CharDistributionAnalysis(object):
|
|||
return self.SURE_NO
|
||||
|
||||
if self._total_chars != self._freq_chars:
|
||||
r = (self._freq_chars / ((self._total_chars - self._freq_chars)
|
||||
* self.typical_distribution_ratio))
|
||||
r = self._freq_chars / (
|
||||
(self._total_chars - self._freq_chars) * self.typical_distribution_ratio
|
||||
)
|
||||
if r < self.SURE_YES:
|
||||
return r
|
||||
|
||||
# normalize confidence (we don't want to be 100% sure)
|
||||
return self.SURE_YES
|
||||
|
||||
def got_enough_data(self):
|
||||
def got_enough_data(self) -> bool:
|
||||
# It is not necessary to receive all data to draw conclusion.
|
||||
# For charset detection, certain amount of data is enough
|
||||
return self._total_chars > self.ENOUGH_DATA_THRESHOLD
|
||||
|
||||
def get_order(self, byte_str):
|
||||
def get_order(self, _: Union[bytes, bytearray]) -> int:
|
||||
# We do not handle characters based on the original encoding string,
|
||||
# but convert this encoding string to a number, here called order.
|
||||
# This allows multiple encodings of a language to share one frequency
|
||||
|
@ -111,13 +130,13 @@ class CharDistributionAnalysis(object):
|
|||
|
||||
|
||||
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
|
||||
def __init__(self):
|
||||
super(EUCTWDistributionAnalysis, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
|
||||
self._table_size = EUCTW_TABLE_SIZE
|
||||
self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
|
||||
|
||||
def get_order(self, byte_str):
|
||||
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
||||
# for euc-TW encoding, we are interested
|
||||
# first byte range: 0xc4 -- 0xfe
|
||||
# second byte range: 0xa1 -- 0xfe
|
||||
|
@ -125,18 +144,17 @@ class EUCTWDistributionAnalysis(CharDistributionAnalysis):
|
|||
first_char = byte_str[0]
|
||||
if first_char >= 0xC4:
|
||||
return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1
|
||||
else:
|
||||
return -1
|
||||
|
||||
|
||||
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
|
||||
def __init__(self):
|
||||
super(EUCKRDistributionAnalysis, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
|
||||
self._table_size = EUCKR_TABLE_SIZE
|
||||
self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
|
||||
|
||||
def get_order(self, byte_str):
|
||||
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
||||
# for euc-KR encoding, we are interested
|
||||
# first byte range: 0xb0 -- 0xfe
|
||||
# second byte range: 0xa1 -- 0xfe
|
||||
|
@ -144,18 +162,32 @@ class EUCKRDistributionAnalysis(CharDistributionAnalysis):
|
|||
first_char = byte_str[0]
|
||||
if first_char >= 0xB0:
|
||||
return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1
|
||||
else:
|
||||
return -1
|
||||
|
||||
|
||||
class JOHABDistributionAnalysis(CharDistributionAnalysis):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
|
||||
self._table_size = EUCKR_TABLE_SIZE
|
||||
self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
|
||||
|
||||
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
||||
first_char = byte_str[0]
|
||||
if 0x88 <= first_char < 0xD4:
|
||||
code = first_char * 256 + byte_str[1]
|
||||
return JOHAB_TO_EUCKR_ORDER_TABLE.get(code, -1)
|
||||
return -1
|
||||
|
||||
|
||||
class GB2312DistributionAnalysis(CharDistributionAnalysis):
|
||||
def __init__(self):
|
||||
super(GB2312DistributionAnalysis, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
|
||||
self._table_size = GB2312_TABLE_SIZE
|
||||
self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
|
||||
|
||||
def get_order(self, byte_str):
|
||||
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
||||
# for GB2312 encoding, we are interested
|
||||
# first byte range: 0xb0 -- 0xfe
|
||||
# second byte range: 0xa1 -- 0xfe
|
||||
|
@ -163,18 +195,17 @@ class GB2312DistributionAnalysis(CharDistributionAnalysis):
|
|||
first_char, second_char = byte_str[0], byte_str[1]
|
||||
if (first_char >= 0xB0) and (second_char >= 0xA1):
|
||||
return 94 * (first_char - 0xB0) + second_char - 0xA1
|
||||
else:
|
||||
return -1
|
||||
|
||||
|
||||
class Big5DistributionAnalysis(CharDistributionAnalysis):
|
||||
def __init__(self):
|
||||
super(Big5DistributionAnalysis, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER
|
||||
self._table_size = BIG5_TABLE_SIZE
|
||||
self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO
|
||||
|
||||
def get_order(self, byte_str):
|
||||
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
||||
# for big5 encoding, we are interested
|
||||
# first byte range: 0xa4 -- 0xfe
|
||||
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
|
||||
|
@ -183,28 +214,26 @@ class Big5DistributionAnalysis(CharDistributionAnalysis):
|
|||
if first_char >= 0xA4:
|
||||
if second_char >= 0xA1:
|
||||
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
|
||||
else:
|
||||
return 157 * (first_char - 0xA4) + second_char - 0x40
|
||||
else:
|
||||
return -1
|
||||
|
||||
|
||||
class SJISDistributionAnalysis(CharDistributionAnalysis):
|
||||
def __init__(self):
|
||||
super(SJISDistributionAnalysis, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
|
||||
self._table_size = JIS_TABLE_SIZE
|
||||
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
|
||||
|
||||
def get_order(self, byte_str):
|
||||
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
||||
# for sjis encoding, we are interested
|
||||
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
|
||||
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
|
||||
# no validation needed here. State machine has done that
|
||||
first_char, second_char = byte_str[0], byte_str[1]
|
||||
if (first_char >= 0x81) and (first_char <= 0x9F):
|
||||
if 0x81 <= first_char <= 0x9F:
|
||||
order = 188 * (first_char - 0x81)
|
||||
elif (first_char >= 0xE0) and (first_char <= 0xEF):
|
||||
elif 0xE0 <= first_char <= 0xEF:
|
||||
order = 188 * (first_char - 0xE0 + 31)
|
||||
else:
|
||||
return -1
|
||||
|
@ -215,19 +244,18 @@ class SJISDistributionAnalysis(CharDistributionAnalysis):
|
|||
|
||||
|
||||
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
|
||||
def __init__(self):
|
||||
super(EUCJPDistributionAnalysis, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
|
||||
self._table_size = JIS_TABLE_SIZE
|
||||
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
|
||||
|
||||
def get_order(self, byte_str):
|
||||
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
||||
# for euc-JP encoding, we are interested
|
||||
# first byte range: 0xa0 -- 0xfe
|
||||
# second byte range: 0xa1 -- 0xfe
|
||||
# no validation needed here. State machine has done that
|
||||
char = byte_str[0]
|
||||
if char >= 0xA0:
|
||||
return 94 * (char - 0xA1) + byte_str[1] - 0xa1
|
||||
else:
|
||||
return 94 * (char - 0xA1) + byte_str[1] - 0xA1
|
||||
return -1
|
||||
|
|
|
@ -25,29 +25,30 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from .enums import ProbingState
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from .charsetprober import CharSetProber
|
||||
from .enums import LanguageFilter, ProbingState
|
||||
|
||||
|
||||
class CharSetGroupProber(CharSetProber):
|
||||
def __init__(self, lang_filter=None):
|
||||
super(CharSetGroupProber, self).__init__(lang_filter=lang_filter)
|
||||
def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
|
||||
super().__init__(lang_filter=lang_filter)
|
||||
self._active_num = 0
|
||||
self.probers = []
|
||||
self._best_guess_prober = None
|
||||
self.probers: List[CharSetProber] = []
|
||||
self._best_guess_prober: Optional[CharSetProber] = None
|
||||
|
||||
def reset(self):
|
||||
super(CharSetGroupProber, self).reset()
|
||||
def reset(self) -> None:
|
||||
super().reset()
|
||||
self._active_num = 0
|
||||
for prober in self.probers:
|
||||
if prober:
|
||||
prober.reset()
|
||||
prober.active = True
|
||||
self._active_num += 1
|
||||
self._best_guess_prober = None
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> Optional[str]:
|
||||
if not self._best_guess_prober:
|
||||
self.get_confidence()
|
||||
if not self._best_guess_prober:
|
||||
|
@ -55,17 +56,15 @@ class CharSetGroupProber(CharSetProber):
|
|||
return self._best_guess_prober.charset_name
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
def language(self) -> Optional[str]:
|
||||
if not self._best_guess_prober:
|
||||
self.get_confidence()
|
||||
if not self._best_guess_prober:
|
||||
return None
|
||||
return self._best_guess_prober.language
|
||||
|
||||
def feed(self, byte_str):
|
||||
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
||||
for prober in self.probers:
|
||||
if not prober:
|
||||
continue
|
||||
if not prober.active:
|
||||
continue
|
||||
state = prober.feed(byte_str)
|
||||
|
@ -73,8 +72,9 @@ class CharSetGroupProber(CharSetProber):
|
|||
continue
|
||||
if state == ProbingState.FOUND_IT:
|
||||
self._best_guess_prober = prober
|
||||
self._state = ProbingState.FOUND_IT
|
||||
return self.state
|
||||
elif state == ProbingState.NOT_ME:
|
||||
if state == ProbingState.NOT_ME:
|
||||
prober.active = False
|
||||
self._active_num -= 1
|
||||
if self._active_num <= 0:
|
||||
|
@ -82,22 +82,22 @@ class CharSetGroupProber(CharSetProber):
|
|||
return self.state
|
||||
return self.state
|
||||
|
||||
def get_confidence(self):
|
||||
def get_confidence(self) -> float:
|
||||
state = self.state
|
||||
if state == ProbingState.FOUND_IT:
|
||||
return 0.99
|
||||
elif state == ProbingState.NOT_ME:
|
||||
if state == ProbingState.NOT_ME:
|
||||
return 0.01
|
||||
best_conf = 0.0
|
||||
self._best_guess_prober = None
|
||||
for prober in self.probers:
|
||||
if not prober:
|
||||
continue
|
||||
if not prober.active:
|
||||
self.logger.debug('%s not active', prober.charset_name)
|
||||
self.logger.debug("%s not active", prober.charset_name)
|
||||
continue
|
||||
conf = prober.get_confidence()
|
||||
self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf)
|
||||
self.logger.debug(
|
||||
"%s %s confidence = %s", prober.charset_name, prober.language, conf
|
||||
)
|
||||
if best_conf < conf:
|
||||
best_conf = conf
|
||||
self._best_guess_prober = prober
|
||||
|
|
|
@ -28,54 +28,62 @@
|
|||
|
||||
import logging
|
||||
import re
|
||||
from typing import Optional, Union
|
||||
|
||||
from .enums import ProbingState
|
||||
from .enums import LanguageFilter, ProbingState
|
||||
|
||||
INTERNATIONAL_WORDS_PATTERN = re.compile(
|
||||
b"[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?"
|
||||
)
|
||||
|
||||
|
||||
class CharSetProber(object):
|
||||
class CharSetProber:
|
||||
|
||||
SHORTCUT_THRESHOLD = 0.95
|
||||
|
||||
def __init__(self, lang_filter=None):
|
||||
self._state = None
|
||||
def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
|
||||
self._state = ProbingState.DETECTING
|
||||
self.active = True
|
||||
self.lang_filter = lang_filter
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
def reset(self):
|
||||
def reset(self) -> None:
|
||||
self._state = ProbingState.DETECTING
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> Optional[str]:
|
||||
return None
|
||||
|
||||
def feed(self, buf):
|
||||
pass
|
||||
@property
|
||||
def language(self) -> Optional[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def state(self):
|
||||
def state(self) -> ProbingState:
|
||||
return self._state
|
||||
|
||||
def get_confidence(self):
|
||||
def get_confidence(self) -> float:
|
||||
return 0.0
|
||||
|
||||
@staticmethod
|
||||
def filter_high_byte_only(buf):
|
||||
buf = re.sub(b'([\x00-\x7F])+', b' ', buf)
|
||||
def filter_high_byte_only(buf: Union[bytes, bytearray]) -> bytes:
|
||||
buf = re.sub(b"([\x00-\x7F])+", b" ", buf)
|
||||
return buf
|
||||
|
||||
@staticmethod
|
||||
def filter_international_words(buf):
|
||||
def filter_international_words(buf: Union[bytes, bytearray]) -> bytearray:
|
||||
"""
|
||||
We define three types of bytes:
|
||||
alphabet: english alphabets [a-zA-Z]
|
||||
international: international characters [\x80-\xFF]
|
||||
marker: everything else [^a-zA-Z\x80-\xFF]
|
||||
|
||||
The input buffer can be thought to contain a series of words delimited
|
||||
by markers. This function works to filter all words that contain at
|
||||
least one international character. All contiguous sequences of markers
|
||||
are replaced by a single space ascii character.
|
||||
|
||||
This filter applies to all scripts which do not use English characters.
|
||||
"""
|
||||
filtered = bytearray()
|
||||
|
@ -83,8 +91,7 @@ class CharSetProber(object):
|
|||
# This regex expression filters out only words that have at-least one
|
||||
# international character. The word may include one marker character at
|
||||
# the end.
|
||||
words = re.findall(b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?',
|
||||
buf)
|
||||
words = INTERNATIONAL_WORDS_PATTERN.findall(buf)
|
||||
|
||||
for word in words:
|
||||
filtered.extend(word[:-1])
|
||||
|
@ -94,20 +101,17 @@ class CharSetProber(object):
|
|||
# similarly across all languages and may thus have similar
|
||||
# frequencies).
|
||||
last_char = word[-1:]
|
||||
if not last_char.isalpha() and last_char < b'\x80':
|
||||
last_char = b' '
|
||||
if not last_char.isalpha() and last_char < b"\x80":
|
||||
last_char = b" "
|
||||
filtered.extend(last_char)
|
||||
|
||||
return filtered
|
||||
|
||||
@staticmethod
|
||||
def filter_with_english_letters(buf):
|
||||
def remove_xml_tags(buf: Union[bytes, bytearray]) -> bytes:
|
||||
"""
|
||||
Returns a copy of ``buf`` that retains only the sequences of English
|
||||
alphabet and high byte characters that are not between <> characters.
|
||||
Also retains English alphabet and high byte characters immediately
|
||||
before occurrences of >.
|
||||
|
||||
This filter can be applied to all scripts which contain both English
|
||||
characters and extended ASCII characters, but is currently only used by
|
||||
``Latin1Prober``.
|
||||
|
@ -115,26 +119,24 @@ class CharSetProber(object):
|
|||
filtered = bytearray()
|
||||
in_tag = False
|
||||
prev = 0
|
||||
buf = memoryview(buf).cast("c")
|
||||
|
||||
for curr in range(len(buf)):
|
||||
# Slice here to get bytes instead of an int with Python 3
|
||||
buf_char = buf[curr:curr + 1]
|
||||
# Check if we're coming out of or entering an HTML tag
|
||||
if buf_char == b'>':
|
||||
for curr, buf_char in enumerate(buf):
|
||||
# Check if we're coming out of or entering an XML tag
|
||||
|
||||
# https://github.com/python/typeshed/issues/8182
|
||||
if buf_char == b">": # type: ignore[comparison-overlap]
|
||||
prev = curr + 1
|
||||
in_tag = False
|
||||
elif buf_char == b'<':
|
||||
in_tag = True
|
||||
|
||||
# If current character is not extended-ASCII and not alphabetic...
|
||||
if buf_char < b'\x80' and not buf_char.isalpha():
|
||||
# ...and we're not in a tag
|
||||
# https://github.com/python/typeshed/issues/8182
|
||||
elif buf_char == b"<": # type: ignore[comparison-overlap]
|
||||
if curr > prev and not in_tag:
|
||||
# Keep everything after last non-extended-ASCII,
|
||||
# non-alphabetic character
|
||||
filtered.extend(buf[prev:curr])
|
||||
# Output a space to delimit stretch we kept
|
||||
filtered.extend(b' ')
|
||||
prev = curr + 1
|
||||
filtered.extend(b" ")
|
||||
in_tag = True
|
||||
|
||||
# If we're not in a tag...
|
||||
if not in_tag:
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
#!/usr/bin/env python
|
||||
"""
|
||||
Script which takes one or more file paths and reports on their detected
|
||||
encodings
|
||||
|
@ -13,17 +12,21 @@ If no paths are provided, it takes its input from stdin.
|
|||
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from typing import Iterable, List, Optional
|
||||
|
||||
from chardet import __version__
|
||||
from chardet.compat import PY2
|
||||
from chardet.universaldetector import UniversalDetector
|
||||
from .. import __version__
|
||||
from ..universaldetector import UniversalDetector
|
||||
|
||||
|
||||
def description_of(lines, name='stdin'):
|
||||
def description_of(
|
||||
lines: Iterable[bytes],
|
||||
name: str = "stdin",
|
||||
minimal: bool = False,
|
||||
should_rename_legacy: bool = False,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Return a string describing the probable encoding of a file or
|
||||
list of strings.
|
||||
|
@ -32,8 +35,11 @@ def description_of(lines, name='stdin'):
|
|||
:type lines: Iterable of bytes
|
||||
:param name: Name of file or collection of lines
|
||||
:type name: str
|
||||
:param should_rename_legacy: Should we rename legacy encodings to
|
||||
their more modern equivalents?
|
||||
:type should_rename_legacy: ``bool``
|
||||
"""
|
||||
u = UniversalDetector()
|
||||
u = UniversalDetector(should_rename_legacy=should_rename_legacy)
|
||||
for line in lines:
|
||||
line = bytearray(line)
|
||||
u.feed(line)
|
||||
|
@ -42,16 +48,14 @@ def description_of(lines, name='stdin'):
|
|||
break
|
||||
u.close()
|
||||
result = u.result
|
||||
if PY2:
|
||||
name = name.decode(sys.getfilesystemencoding(), 'ignore')
|
||||
if result['encoding']:
|
||||
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
|
||||
result['confidence'])
|
||||
else:
|
||||
return '{0}: no result'.format(name)
|
||||
if minimal:
|
||||
return result["encoding"]
|
||||
if result["encoding"]:
|
||||
return f'{name}: {result["encoding"]} with confidence {result["confidence"]}'
|
||||
return f"{name}: no result"
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
def main(argv: Optional[List[str]] = None) -> None:
|
||||
"""
|
||||
Handles command line arguments and gets things started.
|
||||
|
||||
|
@ -61,25 +65,48 @@ def main(argv=None):
|
|||
"""
|
||||
# Get command line arguments
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Takes one or more file paths and reports their detected \
|
||||
encodings")
|
||||
parser.add_argument('input',
|
||||
help='File whose encoding we would like to determine. \
|
||||
(default: stdin)',
|
||||
type=argparse.FileType('rb'), nargs='*',
|
||||
default=[sys.stdin if PY2 else sys.stdin.buffer])
|
||||
parser.add_argument('--version', action='version',
|
||||
version='%(prog)s {0}'.format(__version__))
|
||||
description=(
|
||||
"Takes one or more file paths and reports their detected encodings"
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"input",
|
||||
help="File whose encoding we would like to determine. (default: stdin)",
|
||||
type=argparse.FileType("rb"),
|
||||
nargs="*",
|
||||
default=[sys.stdin.buffer],
|
||||
)
|
||||
parser.add_argument(
|
||||
"--minimal",
|
||||
help="Print only the encoding to standard output",
|
||||
action="store_true",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--legacy",
|
||||
help="Rename legacy encodings to more modern ones.",
|
||||
action="store_true",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--version", action="version", version=f"%(prog)s {__version__}"
|
||||
)
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
for f in args.input:
|
||||
if f.isatty():
|
||||
print("You are running chardetect interactively. Press " +
|
||||
"CTRL-D twice at the start of a blank line to signal the " +
|
||||
"end of your input. If you want help, run chardetect " +
|
||||
"--help\n", file=sys.stderr)
|
||||
print(description_of(f, f.name))
|
||||
print(
|
||||
"You are running chardetect interactively. Press "
|
||||
"CTRL-D twice at the start of a blank line to signal the "
|
||||
"end of your input. If you want help, run chardetect "
|
||||
"--help\n",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print(
|
||||
description_of(
|
||||
f, f.name, minimal=args.minimal, should_rename_legacy=args.legacy
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -27,10 +27,11 @@
|
|||
|
||||
import logging
|
||||
|
||||
from .codingstatemachinedict import CodingStateMachineDict
|
||||
from .enums import MachineState
|
||||
|
||||
|
||||
class CodingStateMachine(object):
|
||||
class CodingStateMachine:
|
||||
"""
|
||||
A state machine to verify a byte sequence for a particular encoding. For
|
||||
each byte the detector receives, it will feed that byte to every active
|
||||
|
@ -52,37 +53,38 @@ class CodingStateMachine(object):
|
|||
negative answer for this encoding. Detector will exclude this
|
||||
encoding from consideration from here on.
|
||||
"""
|
||||
def __init__(self, sm):
|
||||
|
||||
def __init__(self, sm: CodingStateMachineDict) -> None:
|
||||
self._model = sm
|
||||
self._curr_byte_pos = 0
|
||||
self._curr_char_len = 0
|
||||
self._curr_state = None
|
||||
self._curr_state = MachineState.START
|
||||
self.active = True
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
def reset(self) -> None:
|
||||
self._curr_state = MachineState.START
|
||||
|
||||
def next_state(self, c):
|
||||
def next_state(self, c: int) -> int:
|
||||
# for each byte we get its class
|
||||
# if it is first byte, we also get byte length
|
||||
byte_class = self._model['class_table'][c]
|
||||
byte_class = self._model["class_table"][c]
|
||||
if self._curr_state == MachineState.START:
|
||||
self._curr_byte_pos = 0
|
||||
self._curr_char_len = self._model['char_len_table'][byte_class]
|
||||
self._curr_char_len = self._model["char_len_table"][byte_class]
|
||||
# from byte's class and state_table, we get its next state
|
||||
curr_state = (self._curr_state * self._model['class_factor']
|
||||
+ byte_class)
|
||||
self._curr_state = self._model['state_table'][curr_state]
|
||||
curr_state = self._curr_state * self._model["class_factor"] + byte_class
|
||||
self._curr_state = self._model["state_table"][curr_state]
|
||||
self._curr_byte_pos += 1
|
||||
return self._curr_state
|
||||
|
||||
def get_current_charlen(self):
|
||||
def get_current_charlen(self) -> int:
|
||||
return self._curr_char_len
|
||||
|
||||
def get_coding_state_machine(self):
|
||||
return self._model['name']
|
||||
def get_coding_state_machine(self) -> str:
|
||||
return self._model["name"]
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
return self._model['language']
|
||||
def language(self) -> str:
|
||||
return self._model["language"]
|
||||
|
|
19
lib/chardet/codingstatemachinedict.py
Normal file
19
lib/chardet/codingstatemachinedict.py
Normal file
|
@ -0,0 +1,19 @@
|
|||
from typing import TYPE_CHECKING, Tuple
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# TypedDict was introduced in Python 3.8.
|
||||
#
|
||||
# TODO: Remove the else block and TYPE_CHECKING check when dropping support
|
||||
# for Python 3.7.
|
||||
from typing import TypedDict
|
||||
|
||||
class CodingStateMachineDict(TypedDict, total=False):
|
||||
class_table: Tuple[int, ...]
|
||||
class_factor: int
|
||||
state_table: Tuple[int, ...]
|
||||
char_len_table: Tuple[int, ...]
|
||||
name: str
|
||||
language: str # Optional key
|
||||
|
||||
else:
|
||||
CodingStateMachineDict = dict
|
|
@ -1,36 +0,0 @@
|
|||
######################## BEGIN LICENSE BLOCK ########################
|
||||
# Contributor(s):
|
||||
# Dan Blanchard
|
||||
# Ian Cordasco
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
PY2 = True
|
||||
PY3 = False
|
||||
string_types = (str, unicode)
|
||||
text_type = unicode
|
||||
iteritems = dict.iteritems
|
||||
else:
|
||||
PY2 = False
|
||||
PY3 = True
|
||||
string_types = (bytes, str)
|
||||
text_type = str
|
||||
iteritems = dict.items
|
|
@ -32,8 +32,8 @@ from .mbcssm import CP949_SM_MODEL
|
|||
|
||||
|
||||
class CP949Prober(MultiByteCharSetProber):
|
||||
def __init__(self):
|
||||
super(CP949Prober, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.coding_sm = CodingStateMachine(CP949_SM_MODEL)
|
||||
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
|
||||
# not different.
|
||||
|
@ -41,9 +41,9 @@ class CP949Prober(MultiByteCharSetProber):
|
|||
self.reset()
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> str:
|
||||
return "CP949"
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
def language(self) -> str:
|
||||
return "Korean"
|
||||
|
|
|
@ -4,21 +4,26 @@ All of the Enums that are used throughout the chardet package.
|
|||
:author: Dan Blanchard (dan.blanchard@gmail.com)
|
||||
"""
|
||||
|
||||
from enum import Enum, Flag
|
||||
|
||||
class InputState(object):
|
||||
|
||||
class InputState:
|
||||
"""
|
||||
This enum represents the different states a universal detector can be in.
|
||||
"""
|
||||
|
||||
PURE_ASCII = 0
|
||||
ESC_ASCII = 1
|
||||
HIGH_BYTE = 2
|
||||
|
||||
|
||||
class LanguageFilter(object):
|
||||
class LanguageFilter(Flag):
|
||||
"""
|
||||
This enum represents the different language filters we can apply to a
|
||||
``UniversalDetector``.
|
||||
"""
|
||||
|
||||
NONE = 0x00
|
||||
CHINESE_SIMPLIFIED = 0x01
|
||||
CHINESE_TRADITIONAL = 0x02
|
||||
JAPANESE = 0x04
|
||||
|
@ -29,46 +34,50 @@ class LanguageFilter(object):
|
|||
CJK = CHINESE | JAPANESE | KOREAN
|
||||
|
||||
|
||||
class ProbingState(object):
|
||||
class ProbingState(Enum):
|
||||
"""
|
||||
This enum represents the different states a prober can be in.
|
||||
"""
|
||||
|
||||
DETECTING = 0
|
||||
FOUND_IT = 1
|
||||
NOT_ME = 2
|
||||
|
||||
|
||||
class MachineState(object):
|
||||
class MachineState:
|
||||
"""
|
||||
This enum represents the different states a state machine can be in.
|
||||
"""
|
||||
|
||||
START = 0
|
||||
ERROR = 1
|
||||
ITS_ME = 2
|
||||
|
||||
|
||||
class SequenceLikelihood(object):
|
||||
class SequenceLikelihood:
|
||||
"""
|
||||
This enum represents the likelihood of a character following the previous one.
|
||||
"""
|
||||
|
||||
NEGATIVE = 0
|
||||
UNLIKELY = 1
|
||||
LIKELY = 2
|
||||
POSITIVE = 3
|
||||
|
||||
@classmethod
|
||||
def get_num_categories(cls):
|
||||
def get_num_categories(cls) -> int:
|
||||
""":returns: The number of likelihood categories in the enum."""
|
||||
return 4
|
||||
|
||||
|
||||
class CharacterCategory(object):
|
||||
class CharacterCategory:
|
||||
"""
|
||||
This enum represents the different categories language models for
|
||||
``SingleByteCharsetProber`` put characters into.
|
||||
|
||||
Anything less than CONTROL is considered a letter.
|
||||
"""
|
||||
|
||||
UNDEFINED = 255
|
||||
LINE_BREAK = 254
|
||||
SYMBOL = 253
|
||||
|
|
|
@ -25,11 +25,17 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from typing import Optional, Union
|
||||
|
||||
from .charsetprober import CharSetProber
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .enums import LanguageFilter, ProbingState, MachineState
|
||||
from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL,
|
||||
ISO2022KR_SM_MODEL)
|
||||
from .enums import LanguageFilter, MachineState, ProbingState
|
||||
from .escsm import (
|
||||
HZ_SM_MODEL,
|
||||
ISO2022CN_SM_MODEL,
|
||||
ISO2022JP_SM_MODEL,
|
||||
ISO2022KR_SM_MODEL,
|
||||
)
|
||||
|
||||
|
||||
class EscCharSetProber(CharSetProber):
|
||||
|
@ -39,8 +45,8 @@ class EscCharSetProber(CharSetProber):
|
|||
identify these encodings.
|
||||
"""
|
||||
|
||||
def __init__(self, lang_filter=None):
|
||||
super(EscCharSetProber, self).__init__(lang_filter=lang_filter)
|
||||
def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
|
||||
super().__init__(lang_filter=lang_filter)
|
||||
self.coding_sm = []
|
||||
if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED:
|
||||
self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL))
|
||||
|
@ -49,17 +55,15 @@ class EscCharSetProber(CharSetProber):
|
|||
self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL))
|
||||
if self.lang_filter & LanguageFilter.KOREAN:
|
||||
self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL))
|
||||
self.active_sm_count = None
|
||||
self._detected_charset = None
|
||||
self._detected_language = None
|
||||
self._state = None
|
||||
self.active_sm_count = 0
|
||||
self._detected_charset: Optional[str] = None
|
||||
self._detected_language: Optional[str] = None
|
||||
self._state = ProbingState.DETECTING
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
super(EscCharSetProber, self).reset()
|
||||
def reset(self) -> None:
|
||||
super().reset()
|
||||
for coding_sm in self.coding_sm:
|
||||
if not coding_sm:
|
||||
continue
|
||||
coding_sm.active = True
|
||||
coding_sm.reset()
|
||||
self.active_sm_count = len(self.coding_sm)
|
||||
|
@ -67,23 +71,20 @@ class EscCharSetProber(CharSetProber):
|
|||
self._detected_language = None
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> Optional[str]:
|
||||
return self._detected_charset
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
def language(self) -> Optional[str]:
|
||||
return self._detected_language
|
||||
|
||||
def get_confidence(self):
|
||||
if self._detected_charset:
|
||||
return 0.99
|
||||
else:
|
||||
return 0.00
|
||||
def get_confidence(self) -> float:
|
||||
return 0.99 if self._detected_charset else 0.00
|
||||
|
||||
def feed(self, byte_str):
|
||||
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
||||
for c in byte_str:
|
||||
for coding_sm in self.coding_sm:
|
||||
if not coding_sm or not coding_sm.active:
|
||||
if not coding_sm.active:
|
||||
continue
|
||||
coding_state = coding_sm.next_state(c)
|
||||
if coding_state == MachineState.ERROR:
|
||||
|
|
|
@ -25,8 +25,10 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from .codingstatemachinedict import CodingStateMachineDict
|
||||
from .enums import MachineState
|
||||
|
||||
# fmt: off
|
||||
HZ_CLS = (
|
||||
1, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
|
||||
0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f
|
||||
|
@ -70,16 +72,20 @@ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,Ma
|
|||
4, MachineState.ERROR, 4, 4, 4, MachineState.ERROR, 4, MachineState.ERROR, # 20-27
|
||||
4, MachineState.ITS_ME, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 28-2f
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
|
||||
|
||||
HZ_SM_MODEL = {'class_table': HZ_CLS,
|
||||
'class_factor': 6,
|
||||
'state_table': HZ_ST,
|
||||
'char_len_table': HZ_CHAR_LEN_TABLE,
|
||||
'name': "HZ-GB-2312",
|
||||
'language': 'Chinese'}
|
||||
HZ_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": HZ_CLS,
|
||||
"class_factor": 6,
|
||||
"state_table": HZ_ST,
|
||||
"char_len_table": HZ_CHAR_LEN_TABLE,
|
||||
"name": "HZ-GB-2312",
|
||||
"language": "Chinese",
|
||||
}
|
||||
|
||||
# fmt: off
|
||||
ISO2022CN_CLS = (
|
||||
2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
|
||||
0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f
|
||||
|
@ -125,16 +131,20 @@ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,Mac
|
|||
MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 30-37
|
||||
MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.START, # 38-3f
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
|
||||
ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS,
|
||||
'class_factor': 9,
|
||||
'state_table': ISO2022CN_ST,
|
||||
'char_len_table': ISO2022CN_CHAR_LEN_TABLE,
|
||||
'name': "ISO-2022-CN",
|
||||
'language': 'Chinese'}
|
||||
ISO2022CN_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": ISO2022CN_CLS,
|
||||
"class_factor": 9,
|
||||
"state_table": ISO2022CN_ST,
|
||||
"char_len_table": ISO2022CN_CHAR_LEN_TABLE,
|
||||
"name": "ISO-2022-CN",
|
||||
"language": "Chinese",
|
||||
}
|
||||
|
||||
# fmt: off
|
||||
ISO2022JP_CLS = (
|
||||
2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
|
||||
0, 0, 0, 0, 0, 0, 2, 2, # 08 - 0f
|
||||
|
@ -181,16 +191,20 @@ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,Mach
|
|||
MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 38-3f
|
||||
MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.START, MachineState.START, # 40-47
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
|
||||
ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS,
|
||||
'class_factor': 10,
|
||||
'state_table': ISO2022JP_ST,
|
||||
'char_len_table': ISO2022JP_CHAR_LEN_TABLE,
|
||||
'name': "ISO-2022-JP",
|
||||
'language': 'Japanese'}
|
||||
ISO2022JP_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": ISO2022JP_CLS,
|
||||
"class_factor": 10,
|
||||
"state_table": ISO2022JP_ST,
|
||||
"char_len_table": ISO2022JP_CHAR_LEN_TABLE,
|
||||
"name": "ISO-2022-JP",
|
||||
"language": "Japanese",
|
||||
}
|
||||
|
||||
# fmt: off
|
||||
ISO2022KR_CLS = (
|
||||
2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
|
||||
0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f
|
||||
|
@ -233,14 +247,15 @@ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,Ma
|
|||
MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, 5, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 18-1f
|
||||
MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 20-27
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
|
||||
|
||||
ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS,
|
||||
'class_factor': 6,
|
||||
'state_table': ISO2022KR_ST,
|
||||
'char_len_table': ISO2022KR_CHAR_LEN_TABLE,
|
||||
'name': "ISO-2022-KR",
|
||||
'language': 'Korean'}
|
||||
|
||||
|
||||
ISO2022KR_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": ISO2022KR_CLS,
|
||||
"class_factor": 6,
|
||||
"state_table": ISO2022KR_ST,
|
||||
"char_len_table": ISO2022KR_CHAR_LEN_TABLE,
|
||||
"name": "ISO-2022-KR",
|
||||
"language": "Korean",
|
||||
}
|
||||
|
|
|
@ -25,68 +25,78 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from .enums import ProbingState, MachineState
|
||||
from .mbcharsetprober import MultiByteCharSetProber
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from typing import Union
|
||||
|
||||
from .chardistribution import EUCJPDistributionAnalysis
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .enums import MachineState, ProbingState
|
||||
from .jpcntx import EUCJPContextAnalysis
|
||||
from .mbcharsetprober import MultiByteCharSetProber
|
||||
from .mbcssm import EUCJP_SM_MODEL
|
||||
|
||||
|
||||
class EUCJPProber(MultiByteCharSetProber):
|
||||
def __init__(self):
|
||||
super(EUCJPProber, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
|
||||
self.distribution_analyzer = EUCJPDistributionAnalysis()
|
||||
self.context_analyzer = EUCJPContextAnalysis()
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
super(EUCJPProber, self).reset()
|
||||
def reset(self) -> None:
|
||||
super().reset()
|
||||
self.context_analyzer.reset()
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> str:
|
||||
return "EUC-JP"
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
def language(self) -> str:
|
||||
return "Japanese"
|
||||
|
||||
def feed(self, byte_str):
|
||||
for i in range(len(byte_str)):
|
||||
# PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte
|
||||
coding_state = self.coding_sm.next_state(byte_str[i])
|
||||
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
||||
assert self.coding_sm is not None
|
||||
assert self.distribution_analyzer is not None
|
||||
|
||||
for i, byte in enumerate(byte_str):
|
||||
# PY3K: byte_str is a byte array, so byte is an int, not a byte
|
||||
coding_state = self.coding_sm.next_state(byte)
|
||||
if coding_state == MachineState.ERROR:
|
||||
self.logger.debug('%s %s prober hit error at byte %s',
|
||||
self.charset_name, self.language, i)
|
||||
self.logger.debug(
|
||||
"%s %s prober hit error at byte %s",
|
||||
self.charset_name,
|
||||
self.language,
|
||||
i,
|
||||
)
|
||||
self._state = ProbingState.NOT_ME
|
||||
break
|
||||
elif coding_state == MachineState.ITS_ME:
|
||||
if coding_state == MachineState.ITS_ME:
|
||||
self._state = ProbingState.FOUND_IT
|
||||
break
|
||||
elif coding_state == MachineState.START:
|
||||
if coding_state == MachineState.START:
|
||||
char_len = self.coding_sm.get_current_charlen()
|
||||
if i == 0:
|
||||
self._last_char[1] = byte_str[0]
|
||||
self._last_char[1] = byte
|
||||
self.context_analyzer.feed(self._last_char, char_len)
|
||||
self.distribution_analyzer.feed(self._last_char, char_len)
|
||||
else:
|
||||
self.context_analyzer.feed(byte_str[i - 1:i + 1],
|
||||
char_len)
|
||||
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
|
||||
char_len)
|
||||
self.context_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
|
||||
self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
|
||||
|
||||
self._last_char[0] = byte_str[-1]
|
||||
|
||||
if self.state == ProbingState.DETECTING:
|
||||
if (self.context_analyzer.got_enough_data() and
|
||||
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
|
||||
if self.context_analyzer.got_enough_data() and (
|
||||
self.get_confidence() > self.SHORTCUT_THRESHOLD
|
||||
):
|
||||
self._state = ProbingState.FOUND_IT
|
||||
|
||||
return self.state
|
||||
|
||||
def get_confidence(self):
|
||||
def get_confidence(self) -> float:
|
||||
assert self.distribution_analyzer is not None
|
||||
|
||||
context_conf = self.context_analyzer.get_confidence()
|
||||
distrib_conf = self.distribution_analyzer.get_confidence()
|
||||
return max(context_conf, distrib_conf)
|
||||
|
|
|
@ -43,6 +43,7 @@ EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
|
|||
EUCKR_TABLE_SIZE = 2352
|
||||
|
||||
# Char to FreqOrder table ,
|
||||
# fmt: off
|
||||
EUCKR_CHAR_TO_FREQ_ORDER = (
|
||||
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
|
||||
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
|
||||
|
@ -192,4 +193,4 @@ EUCKR_CHAR_TO_FREQ_ORDER = (
|
|||
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
|
||||
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
|
||||
)
|
||||
|
||||
# fmt: on
|
||||
|
|
|
@ -25,23 +25,23 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from .mbcharsetprober import MultiByteCharSetProber
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .chardistribution import EUCKRDistributionAnalysis
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .mbcharsetprober import MultiByteCharSetProber
|
||||
from .mbcssm import EUCKR_SM_MODEL
|
||||
|
||||
|
||||
class EUCKRProber(MultiByteCharSetProber):
|
||||
def __init__(self):
|
||||
super(EUCKRProber, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
|
||||
self.distribution_analyzer = EUCKRDistributionAnalysis()
|
||||
self.reset()
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> str:
|
||||
return "EUC-KR"
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
def language(self) -> str:
|
||||
return "Korean"
|
||||
|
|
|
@ -43,9 +43,10 @@
|
|||
|
||||
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
|
||||
|
||||
# Char to FreqOrder table ,
|
||||
# Char to FreqOrder table
|
||||
EUCTW_TABLE_SIZE = 5376
|
||||
|
||||
# fmt: off
|
||||
EUCTW_CHAR_TO_FREQ_ORDER = (
|
||||
1, 1800, 1506, 255, 1431, 198, 9, 82, 6, 7310, 177, 202, 3615, 1256, 2808, 110, # 2742
|
||||
3735, 33, 3241, 261, 76, 44, 2113, 16, 2931, 2184, 1176, 659, 3868, 26, 3404, 2643, # 2758
|
||||
|
@ -384,4 +385,4 @@ EUCTW_CHAR_TO_FREQ_ORDER = (
|
|||
890, 3614, 3864, 8110, 1877, 3732, 3402, 8111, 2183, 2353, 3403, 1652, 8112, 8113, 8114, 941, # 8086
|
||||
2294, 208, 3499, 4057, 2019, 330, 4294, 3865, 2892, 2492, 3733, 4295, 8115, 8116, 8117, 8118, # 8102
|
||||
)
|
||||
|
||||
# fmt: on
|
||||
|
|
|
@ -25,22 +25,23 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from .mbcharsetprober import MultiByteCharSetProber
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .chardistribution import EUCTWDistributionAnalysis
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .mbcharsetprober import MultiByteCharSetProber
|
||||
from .mbcssm import EUCTW_SM_MODEL
|
||||
|
||||
|
||||
class EUCTWProber(MultiByteCharSetProber):
|
||||
def __init__(self):
|
||||
super(EUCTWProber, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
|
||||
self.distribution_analyzer = EUCTWDistributionAnalysis()
|
||||
self.reset()
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> str:
|
||||
return "EUC-TW"
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
def language(self) -> str:
|
||||
return "Taiwan"
|
||||
|
|
|
@ -43,6 +43,7 @@ GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
|
|||
|
||||
GB2312_TABLE_SIZE = 3760
|
||||
|
||||
# fmt: off
|
||||
GB2312_CHAR_TO_FREQ_ORDER = (
|
||||
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
|
||||
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
|
||||
|
@ -280,4 +281,4 @@ GB2312_CHAR_TO_FREQ_ORDER = (
|
|||
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
|
||||
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, #last 512
|
||||
)
|
||||
|
||||
# fmt: on
|
||||
|
|
|
@ -25,22 +25,23 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from .mbcharsetprober import MultiByteCharSetProber
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .chardistribution import GB2312DistributionAnalysis
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .mbcharsetprober import MultiByteCharSetProber
|
||||
from .mbcssm import GB2312_SM_MODEL
|
||||
|
||||
|
||||
class GB2312Prober(MultiByteCharSetProber):
|
||||
def __init__(self):
|
||||
super(GB2312Prober, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.coding_sm = CodingStateMachine(GB2312_SM_MODEL)
|
||||
self.distribution_analyzer = GB2312DistributionAnalysis()
|
||||
self.reset()
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> str:
|
||||
return "GB2312"
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
def language(self) -> str:
|
||||
return "Chinese"
|
||||
|
|
|
@ -25,8 +25,11 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from typing import Optional, Union
|
||||
|
||||
from .charsetprober import CharSetProber
|
||||
from .enums import ProbingState
|
||||
from .sbcharsetprober import SingleByteCharSetProber
|
||||
|
||||
# This prober doesn't actually recognize a language or a charset.
|
||||
# It is a helper prober for the use of the Hebrew model probers
|
||||
|
@ -125,18 +128,20 @@ from .enums import ProbingState
|
|||
# model probers scores. The answer is returned in the form of the name of the
|
||||
# charset identified, either "windows-1255" or "ISO-8859-8".
|
||||
|
||||
|
||||
class HebrewProber(CharSetProber):
|
||||
SPACE = 0x20
|
||||
# windows-1255 / ISO-8859-8 code points of interest
|
||||
FINAL_KAF = 0xea
|
||||
NORMAL_KAF = 0xeb
|
||||
FINAL_MEM = 0xed
|
||||
NORMAL_MEM = 0xee
|
||||
FINAL_NUN = 0xef
|
||||
NORMAL_NUN = 0xf0
|
||||
FINAL_PE = 0xf3
|
||||
NORMAL_PE = 0xf4
|
||||
FINAL_TSADI = 0xf5
|
||||
NORMAL_TSADI = 0xf6
|
||||
FINAL_KAF = 0xEA
|
||||
NORMAL_KAF = 0xEB
|
||||
FINAL_MEM = 0xED
|
||||
NORMAL_MEM = 0xEE
|
||||
FINAL_NUN = 0xEF
|
||||
NORMAL_NUN = 0xF0
|
||||
FINAL_PE = 0xF3
|
||||
NORMAL_PE = 0xF4
|
||||
FINAL_TSADI = 0xF5
|
||||
NORMAL_TSADI = 0xF6
|
||||
|
||||
# Minimum Visual vs Logical final letter score difference.
|
||||
# If the difference is below this, don't rely solely on the final letter score
|
||||
|
@ -151,35 +156,44 @@ class HebrewProber(CharSetProber):
|
|||
VISUAL_HEBREW_NAME = "ISO-8859-8"
|
||||
LOGICAL_HEBREW_NAME = "windows-1255"
|
||||
|
||||
def __init__(self):
|
||||
super(HebrewProber, self).__init__()
|
||||
self._final_char_logical_score = None
|
||||
self._final_char_visual_score = None
|
||||
self._prev = None
|
||||
self._before_prev = None
|
||||
self._logical_prober = None
|
||||
self._visual_prober = None
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._final_char_logical_score = 0
|
||||
self._final_char_visual_score = 0
|
||||
self._prev = self.SPACE
|
||||
self._before_prev = self.SPACE
|
||||
self._logical_prober: Optional[SingleByteCharSetProber] = None
|
||||
self._visual_prober: Optional[SingleByteCharSetProber] = None
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
def reset(self) -> None:
|
||||
self._final_char_logical_score = 0
|
||||
self._final_char_visual_score = 0
|
||||
# The two last characters seen in the previous buffer,
|
||||
# mPrev and mBeforePrev are initialized to space in order to simulate
|
||||
# a word delimiter at the beginning of the data
|
||||
self._prev = ' '
|
||||
self._before_prev = ' '
|
||||
self._prev = self.SPACE
|
||||
self._before_prev = self.SPACE
|
||||
# These probers are owned by the group prober.
|
||||
|
||||
def set_model_probers(self, logicalProber, visualProber):
|
||||
self._logical_prober = logicalProber
|
||||
self._visual_prober = visualProber
|
||||
def set_model_probers(
|
||||
self,
|
||||
logical_prober: SingleByteCharSetProber,
|
||||
visual_prober: SingleByteCharSetProber,
|
||||
) -> None:
|
||||
self._logical_prober = logical_prober
|
||||
self._visual_prober = visual_prober
|
||||
|
||||
def is_final(self, c):
|
||||
return c in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN,
|
||||
self.FINAL_PE, self.FINAL_TSADI]
|
||||
def is_final(self, c: int) -> bool:
|
||||
return c in [
|
||||
self.FINAL_KAF,
|
||||
self.FINAL_MEM,
|
||||
self.FINAL_NUN,
|
||||
self.FINAL_PE,
|
||||
self.FINAL_TSADI,
|
||||
]
|
||||
|
||||
def is_non_final(self, c):
|
||||
def is_non_final(self, c: int) -> bool:
|
||||
# The normal Tsadi is not a good Non-Final letter due to words like
|
||||
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
|
||||
# apostrophe is converted to a space in FilterWithoutEnglishLetters
|
||||
|
@ -190,10 +204,9 @@ class HebrewProber(CharSetProber):
|
|||
# for example legally end with a Non-Final Pe or Kaf. However, the
|
||||
# benefit of these letters as Non-Final letters outweighs the damage
|
||||
# since these words are quite rare.
|
||||
return c in [self.NORMAL_KAF, self.NORMAL_MEM,
|
||||
self.NORMAL_NUN, self.NORMAL_PE]
|
||||
return c in [self.NORMAL_KAF, self.NORMAL_MEM, self.NORMAL_NUN, self.NORMAL_PE]
|
||||
|
||||
def feed(self, byte_str):
|
||||
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
||||
# Final letter analysis for logical-visual decision.
|
||||
# Look for evidence that the received buffer is either logical Hebrew
|
||||
# or visual Hebrew.
|
||||
|
@ -227,9 +240,9 @@ class HebrewProber(CharSetProber):
|
|||
byte_str = self.filter_high_byte_only(byte_str)
|
||||
|
||||
for cur in byte_str:
|
||||
if cur == ' ':
|
||||
if cur == self.SPACE:
|
||||
# We stand on a space - a word just ended
|
||||
if self._before_prev != ' ':
|
||||
if self._before_prev != self.SPACE:
|
||||
# next-to-last char was not a space so self._prev is not a
|
||||
# 1 letter word
|
||||
if self.is_final(self._prev):
|
||||
|
@ -241,8 +254,11 @@ class HebrewProber(CharSetProber):
|
|||
self._final_char_visual_score += 1
|
||||
else:
|
||||
# Not standing on a space
|
||||
if ((self._before_prev == ' ') and
|
||||
(self.is_final(self._prev)) and (cur != ' ')):
|
||||
if (
|
||||
(self._before_prev == self.SPACE)
|
||||
and (self.is_final(self._prev))
|
||||
and (cur != self.SPACE)
|
||||
):
|
||||
# case (3) [-2:space][-1:final letter][cur:not space]
|
||||
self._final_char_visual_score += 1
|
||||
self._before_prev = self._prev
|
||||
|
@ -253,7 +269,10 @@ class HebrewProber(CharSetProber):
|
|||
return ProbingState.DETECTING
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> str:
|
||||
assert self._logical_prober is not None
|
||||
assert self._visual_prober is not None
|
||||
|
||||
# Make the decision: is it Logical or Visual?
|
||||
# If the final letter score distance is dominant enough, rely on it.
|
||||
finalsub = self._final_char_logical_score - self._final_char_visual_score
|
||||
|
@ -263,8 +282,9 @@ class HebrewProber(CharSetProber):
|
|||
return self.VISUAL_HEBREW_NAME
|
||||
|
||||
# It's not dominant enough, try to rely on the model scores instead.
|
||||
modelsub = (self._logical_prober.get_confidence()
|
||||
- self._visual_prober.get_confidence())
|
||||
modelsub = (
|
||||
self._logical_prober.get_confidence() - self._visual_prober.get_confidence()
|
||||
)
|
||||
if modelsub > self.MIN_MODEL_DISTANCE:
|
||||
return self.LOGICAL_HEBREW_NAME
|
||||
if modelsub < -self.MIN_MODEL_DISTANCE:
|
||||
|
@ -280,13 +300,17 @@ class HebrewProber(CharSetProber):
|
|||
return self.LOGICAL_HEBREW_NAME
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
return 'Hebrew'
|
||||
def language(self) -> str:
|
||||
return "Hebrew"
|
||||
|
||||
@property
|
||||
def state(self):
|
||||
def state(self) -> ProbingState:
|
||||
assert self._logical_prober is not None
|
||||
assert self._visual_prober is not None
|
||||
|
||||
# Remain active as long as any of the model probers are active.
|
||||
if (self._logical_prober.state == ProbingState.NOT_ME) and \
|
||||
(self._visual_prober.state == ProbingState.NOT_ME):
|
||||
if (self._logical_prober.state == ProbingState.NOT_ME) and (
|
||||
self._visual_prober.state == ProbingState.NOT_ME
|
||||
):
|
||||
return ProbingState.NOT_ME
|
||||
return ProbingState.DETECTING
|
||||
|
|
|
@ -46,6 +46,7 @@ JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
|
|||
# Char to FreqOrder table ,
|
||||
JIS_TABLE_SIZE = 4368
|
||||
|
||||
# fmt: off
|
||||
JIS_CHAR_TO_FREQ_ORDER = (
|
||||
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
|
||||
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
|
||||
|
@ -321,5 +322,4 @@ JIS_CHAR_TO_FREQ_ORDER = (
|
|||
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
|
||||
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
|
||||
)
|
||||
|
||||
|
||||
# fmt: on
|
||||
|
|
2382
lib/chardet/johabfreq.py
Normal file
2382
lib/chardet/johabfreq.py
Normal file
File diff suppressed because it is too large
Load diff
47
lib/chardet/johabprober.py
Normal file
47
lib/chardet/johabprober.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
######################## BEGIN LICENSE BLOCK ########################
|
||||
# The Original Code is mozilla.org code.
|
||||
#
|
||||
# The Initial Developer of the Original Code is
|
||||
# Netscape Communications Corporation.
|
||||
# Portions created by the Initial Developer are Copyright (C) 1998
|
||||
# the Initial Developer. All Rights Reserved.
|
||||
#
|
||||
# Contributor(s):
|
||||
# Mark Pilgrim - port to Python
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from .chardistribution import JOHABDistributionAnalysis
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .mbcharsetprober import MultiByteCharSetProber
|
||||
from .mbcssm import JOHAB_SM_MODEL
|
||||
|
||||
|
||||
class JOHABProber(MultiByteCharSetProber):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.coding_sm = CodingStateMachine(JOHAB_SM_MODEL)
|
||||
self.distribution_analyzer = JOHABDistributionAnalysis()
|
||||
self.reset()
|
||||
|
||||
@property
|
||||
def charset_name(self) -> str:
|
||||
return "Johab"
|
||||
|
||||
@property
|
||||
def language(self) -> str:
|
||||
return "Korean"
|
|
@ -25,9 +25,11 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from typing import List, Tuple, Union
|
||||
|
||||
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
|
||||
jp2CharContext = (
|
||||
# fmt: off
|
||||
jp2_char_context = (
|
||||
(0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1),
|
||||
(2, 4, 0, 4, 0, 3, 0, 4, 0, 3, 4, 4, 4, 2, 4, 3, 3, 4, 3, 2, 3, 3, 4, 2, 3, 3, 3, 2, 4, 1, 4, 3, 3, 1, 5, 4, 3, 4, 3, 4, 3, 5, 3, 0, 3, 5, 4, 2, 0, 3, 1, 0, 3, 3, 0, 3, 3, 0, 1, 1, 0, 4, 3, 0, 3, 3, 0, 4, 0, 2, 0, 3, 5, 5, 5, 5, 4, 0, 4, 1, 0, 3, 4),
|
||||
(0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2),
|
||||
|
@ -112,23 +114,25 @@ jp2CharContext = (
|
|||
(0, 4, 0, 4, 0, 4, 0, 3, 0, 4, 4, 3, 4, 2, 4, 3, 2, 0, 4, 4, 4, 3, 5, 3, 5, 3, 3, 2, 4, 2, 4, 3, 4, 3, 1, 4, 0, 2, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4, 3, 4, 1, 3, 4, 3, 2, 1, 2, 1, 3, 3, 3, 4, 4, 3, 3, 5, 0, 4, 0, 3, 0, 4, 3, 3, 3, 2, 1, 0, 3, 0, 0, 3, 3),
|
||||
(0, 4, 0, 3, 0, 3, 0, 3, 0, 3, 5, 5, 3, 3, 3, 3, 4, 3, 4, 3, 3, 3, 4, 4, 4, 3, 3, 3, 3, 4, 3, 5, 3, 3, 1, 3, 2, 4, 5, 5, 5, 5, 4, 3, 4, 5, 5, 3, 2, 2, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 2, 4, 3, 3, 3, 4, 0, 4, 0, 2, 0, 4, 3, 2, 2, 1, 2, 0, 3, 0, 0, 4, 1),
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
class JapaneseContextAnalysis(object):
|
||||
|
||||
class JapaneseContextAnalysis:
|
||||
NUM_OF_CATEGORY = 6
|
||||
DONT_KNOW = -1
|
||||
ENOUGH_REL_THRESHOLD = 100
|
||||
MAX_REL_THRESHOLD = 1000
|
||||
MINIMUM_DATA_THRESHOLD = 4
|
||||
|
||||
def __init__(self):
|
||||
self._total_rel = None
|
||||
self._rel_sample = None
|
||||
self._need_to_skip_char_num = None
|
||||
self._last_char_order = None
|
||||
self._done = None
|
||||
def __init__(self) -> None:
|
||||
self._total_rel = 0
|
||||
self._rel_sample: List[int] = []
|
||||
self._need_to_skip_char_num = 0
|
||||
self._last_char_order = -1
|
||||
self._done = False
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
def reset(self) -> None:
|
||||
self._total_rel = 0 # total sequence received
|
||||
# category counters, each integer counts sequence in its category
|
||||
self._rel_sample = [0] * self.NUM_OF_CATEGORY
|
||||
|
@ -140,7 +144,7 @@ class JapaneseContextAnalysis(object):
|
|||
# been made
|
||||
self._done = False
|
||||
|
||||
def feed(self, byte_str, num_bytes):
|
||||
def feed(self, byte_str: Union[bytes, bytearray], num_bytes: int) -> None:
|
||||
if self._done:
|
||||
return
|
||||
|
||||
|
@ -164,32 +168,34 @@ class JapaneseContextAnalysis(object):
|
|||
if self._total_rel > self.MAX_REL_THRESHOLD:
|
||||
self._done = True
|
||||
break
|
||||
self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1
|
||||
self._rel_sample[
|
||||
jp2_char_context[self._last_char_order][order]
|
||||
] += 1
|
||||
self._last_char_order = order
|
||||
|
||||
def got_enough_data(self):
|
||||
def got_enough_data(self) -> bool:
|
||||
return self._total_rel > self.ENOUGH_REL_THRESHOLD
|
||||
|
||||
def get_confidence(self):
|
||||
def get_confidence(self) -> float:
|
||||
# This is just one way to calculate confidence. It works well for me.
|
||||
if self._total_rel > self.MINIMUM_DATA_THRESHOLD:
|
||||
return (self._total_rel - self._rel_sample[0]) / self._total_rel
|
||||
else:
|
||||
return self.DONT_KNOW
|
||||
|
||||
def get_order(self, byte_str):
|
||||
def get_order(self, _: Union[bytes, bytearray]) -> Tuple[int, int]:
|
||||
return -1, 1
|
||||
|
||||
|
||||
class SJISContextAnalysis(JapaneseContextAnalysis):
|
||||
def __init__(self):
|
||||
super(SJISContextAnalysis, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._charset_name = "SHIFT_JIS"
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> str:
|
||||
return self._charset_name
|
||||
|
||||
def get_order(self, byte_str):
|
||||
def get_order(self, byte_str: Union[bytes, bytearray]) -> Tuple[int, int]:
|
||||
if not byte_str:
|
||||
return -1, 1
|
||||
# find out current char's byte length
|
||||
|
@ -209,8 +215,9 @@ class SJISContextAnalysis(JapaneseContextAnalysis):
|
|||
|
||||
return -1, char_len
|
||||
|
||||
|
||||
class EUCJPContextAnalysis(JapaneseContextAnalysis):
|
||||
def get_order(self, byte_str):
|
||||
def get_order(self, byte_str: Union[bytes, bytearray]) -> Tuple[int, int]:
|
||||
if not byte_str:
|
||||
return -1, 1
|
||||
# find out current char's byte length
|
||||
|
@ -229,5 +236,3 @@ class EUCJPContextAnalysis(JapaneseContextAnalysis):
|
|||
return second_char - 0xA1, char_len
|
||||
|
||||
return -1, char_len
|
||||
|
||||
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from chardet.sbcharsetprober import SingleByteCharSetModel
|
||||
|
||||
|
||||
# 3: Positive
|
||||
# 2: Likely
|
||||
# 1: Unlikely
|
||||
|
@ -4373,13 +4369,15 @@ ISO_8859_5_BULGARIAN_CHAR_TO_ORDER = {
|
|||
255: 253, # 'џ'
|
||||
}
|
||||
|
||||
ISO_8859_5_BULGARIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-5',
|
||||
language='Bulgairan',
|
||||
ISO_8859_5_BULGARIAN_MODEL = SingleByteCharSetModel(
|
||||
charset_name="ISO-8859-5",
|
||||
language="Bulgarian",
|
||||
char_to_order_map=ISO_8859_5_BULGARIAN_CHAR_TO_ORDER,
|
||||
language_model=BULGARIAN_LANG_MODEL,
|
||||
typical_positive_ratio=0.969392,
|
||||
keep_ascii_letters=False,
|
||||
alphabet='АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя')
|
||||
alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя",
|
||||
)
|
||||
|
||||
WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER = {
|
||||
0: 255, # '\x00'
|
||||
|
@ -4640,11 +4638,12 @@ WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER = {
|
|||
255: 16, # 'я'
|
||||
}
|
||||
|
||||
WINDOWS_1251_BULGARIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1251',
|
||||
language='Bulgarian',
|
||||
WINDOWS_1251_BULGARIAN_MODEL = SingleByteCharSetModel(
|
||||
charset_name="windows-1251",
|
||||
language="Bulgarian",
|
||||
char_to_order_map=WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER,
|
||||
language_model=BULGARIAN_LANG_MODEL,
|
||||
typical_positive_ratio=0.969392,
|
||||
keep_ascii_letters=False,
|
||||
alphabet='АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя')
|
||||
|
||||
alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя",
|
||||
)
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from chardet.sbcharsetprober import SingleByteCharSetModel
|
||||
|
||||
|
||||
# 3: Positive
|
||||
# 2: Likely
|
||||
# 1: Unlikely
|
||||
|
@ -4121,13 +4117,15 @@ WINDOWS_1253_GREEK_CHAR_TO_ORDER = {
|
|||
255: 253, # None
|
||||
}
|
||||
|
||||
WINDOWS_1253_GREEK_MODEL = SingleByteCharSetModel(charset_name='windows-1253',
|
||||
language='Greek',
|
||||
WINDOWS_1253_GREEK_MODEL = SingleByteCharSetModel(
|
||||
charset_name="windows-1253",
|
||||
language="Greek",
|
||||
char_to_order_map=WINDOWS_1253_GREEK_CHAR_TO_ORDER,
|
||||
language_model=GREEK_LANG_MODEL,
|
||||
typical_positive_ratio=0.982851,
|
||||
keep_ascii_letters=False,
|
||||
alphabet='ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ')
|
||||
alphabet="ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ",
|
||||
)
|
||||
|
||||
ISO_8859_7_GREEK_CHAR_TO_ORDER = {
|
||||
0: 255, # '\x00'
|
||||
|
@ -4388,11 +4386,12 @@ ISO_8859_7_GREEK_CHAR_TO_ORDER = {
|
|||
255: 253, # None
|
||||
}
|
||||
|
||||
ISO_8859_7_GREEK_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-7',
|
||||
language='Greek',
|
||||
ISO_8859_7_GREEK_MODEL = SingleByteCharSetModel(
|
||||
charset_name="ISO-8859-7",
|
||||
language="Greek",
|
||||
char_to_order_map=ISO_8859_7_GREEK_CHAR_TO_ORDER,
|
||||
language_model=GREEK_LANG_MODEL,
|
||||
typical_positive_ratio=0.982851,
|
||||
keep_ascii_letters=False,
|
||||
alphabet='ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ')
|
||||
|
||||
alphabet="ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ",
|
||||
)
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from chardet.sbcharsetprober import SingleByteCharSetModel
|
||||
|
||||
|
||||
# 3: Positive
|
||||
# 2: Likely
|
||||
# 1: Unlikely
|
||||
|
@ -4373,11 +4369,12 @@ WINDOWS_1255_HEBREW_CHAR_TO_ORDER = {
|
|||
255: 253, # None
|
||||
}
|
||||
|
||||
WINDOWS_1255_HEBREW_MODEL = SingleByteCharSetModel(charset_name='windows-1255',
|
||||
language='Hebrew',
|
||||
WINDOWS_1255_HEBREW_MODEL = SingleByteCharSetModel(
|
||||
charset_name="windows-1255",
|
||||
language="Hebrew",
|
||||
char_to_order_map=WINDOWS_1255_HEBREW_CHAR_TO_ORDER,
|
||||
language_model=HEBREW_LANG_MODEL,
|
||||
typical_positive_ratio=0.984004,
|
||||
keep_ascii_letters=False,
|
||||
alphabet='אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ')
|
||||
|
||||
alphabet="אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ",
|
||||
)
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from chardet.sbcharsetprober import SingleByteCharSetModel
|
||||
|
||||
|
||||
# 3: Positive
|
||||
# 2: Likely
|
||||
# 1: Unlikely
|
||||
|
@ -4373,13 +4369,15 @@ WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER = {
|
|||
255: 253, # '˙'
|
||||
}
|
||||
|
||||
WINDOWS_1250_HUNGARIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1250',
|
||||
language='Hungarian',
|
||||
WINDOWS_1250_HUNGARIAN_MODEL = SingleByteCharSetModel(
|
||||
charset_name="windows-1250",
|
||||
language="Hungarian",
|
||||
char_to_order_map=WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER,
|
||||
language_model=HUNGARIAN_LANG_MODEL,
|
||||
typical_positive_ratio=0.947368,
|
||||
keep_ascii_letters=True,
|
||||
alphabet='ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű')
|
||||
alphabet="ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű",
|
||||
)
|
||||
|
||||
ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER = {
|
||||
0: 255, # '\x00'
|
||||
|
@ -4640,11 +4638,12 @@ ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER = {
|
|||
255: 253, # '˙'
|
||||
}
|
||||
|
||||
ISO_8859_2_HUNGARIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-2',
|
||||
language='Hungarian',
|
||||
ISO_8859_2_HUNGARIAN_MODEL = SingleByteCharSetModel(
|
||||
charset_name="ISO-8859-2",
|
||||
language="Hungarian",
|
||||
char_to_order_map=ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER,
|
||||
language_model=HUNGARIAN_LANG_MODEL,
|
||||
typical_positive_ratio=0.947368,
|
||||
keep_ascii_letters=True,
|
||||
alphabet='ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű')
|
||||
|
||||
alphabet="ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű",
|
||||
)
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from chardet.sbcharsetprober import SingleByteCharSetModel
|
||||
|
||||
|
||||
# 3: Positive
|
||||
# 2: Likely
|
||||
# 1: Unlikely
|
||||
|
@ -4373,13 +4369,15 @@ IBM866_RUSSIAN_CHAR_TO_ORDER = {
|
|||
255: 255, # '\xa0'
|
||||
}
|
||||
|
||||
IBM866_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='IBM866',
|
||||
language='Russian',
|
||||
IBM866_RUSSIAN_MODEL = SingleByteCharSetModel(
|
||||
charset_name="IBM866",
|
||||
language="Russian",
|
||||
char_to_order_map=IBM866_RUSSIAN_CHAR_TO_ORDER,
|
||||
language_model=RUSSIAN_LANG_MODEL,
|
||||
typical_positive_ratio=0.976601,
|
||||
keep_ascii_letters=False,
|
||||
alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
|
||||
alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
|
||||
)
|
||||
|
||||
WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER = {
|
||||
0: 255, # '\x00'
|
||||
|
@ -4640,13 +4638,15 @@ WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER = {
|
|||
255: 16, # 'я'
|
||||
}
|
||||
|
||||
WINDOWS_1251_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1251',
|
||||
language='Russian',
|
||||
WINDOWS_1251_RUSSIAN_MODEL = SingleByteCharSetModel(
|
||||
charset_name="windows-1251",
|
||||
language="Russian",
|
||||
char_to_order_map=WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER,
|
||||
language_model=RUSSIAN_LANG_MODEL,
|
||||
typical_positive_ratio=0.976601,
|
||||
keep_ascii_letters=False,
|
||||
alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
|
||||
alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
|
||||
)
|
||||
|
||||
IBM855_RUSSIAN_CHAR_TO_ORDER = {
|
||||
0: 255, # '\x00'
|
||||
|
@ -4907,13 +4907,15 @@ IBM855_RUSSIAN_CHAR_TO_ORDER = {
|
|||
255: 255, # '\xa0'
|
||||
}
|
||||
|
||||
IBM855_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='IBM855',
|
||||
language='Russian',
|
||||
IBM855_RUSSIAN_MODEL = SingleByteCharSetModel(
|
||||
charset_name="IBM855",
|
||||
language="Russian",
|
||||
char_to_order_map=IBM855_RUSSIAN_CHAR_TO_ORDER,
|
||||
language_model=RUSSIAN_LANG_MODEL,
|
||||
typical_positive_ratio=0.976601,
|
||||
keep_ascii_letters=False,
|
||||
alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
|
||||
alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
|
||||
)
|
||||
|
||||
KOI8_R_RUSSIAN_CHAR_TO_ORDER = {
|
||||
0: 255, # '\x00'
|
||||
|
@ -5174,13 +5176,15 @@ KOI8_R_RUSSIAN_CHAR_TO_ORDER = {
|
|||
255: 70, # 'Ъ'
|
||||
}
|
||||
|
||||
KOI8_R_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='KOI8-R',
|
||||
language='Russian',
|
||||
KOI8_R_RUSSIAN_MODEL = SingleByteCharSetModel(
|
||||
charset_name="KOI8-R",
|
||||
language="Russian",
|
||||
char_to_order_map=KOI8_R_RUSSIAN_CHAR_TO_ORDER,
|
||||
language_model=RUSSIAN_LANG_MODEL,
|
||||
typical_positive_ratio=0.976601,
|
||||
keep_ascii_letters=False,
|
||||
alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
|
||||
alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
|
||||
)
|
||||
|
||||
MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER = {
|
||||
0: 255, # '\x00'
|
||||
|
@ -5441,13 +5445,15 @@ MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER = {
|
|||
255: 255, # '€'
|
||||
}
|
||||
|
||||
MACCYRILLIC_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='MacCyrillic',
|
||||
language='Russian',
|
||||
MACCYRILLIC_RUSSIAN_MODEL = SingleByteCharSetModel(
|
||||
charset_name="MacCyrillic",
|
||||
language="Russian",
|
||||
char_to_order_map=MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER,
|
||||
language_model=RUSSIAN_LANG_MODEL,
|
||||
typical_positive_ratio=0.976601,
|
||||
keep_ascii_letters=False,
|
||||
alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
|
||||
alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
|
||||
)
|
||||
|
||||
ISO_8859_5_RUSSIAN_CHAR_TO_ORDER = {
|
||||
0: 255, # '\x00'
|
||||
|
@ -5708,11 +5714,12 @@ ISO_8859_5_RUSSIAN_CHAR_TO_ORDER = {
|
|||
255: 255, # 'џ'
|
||||
}
|
||||
|
||||
ISO_8859_5_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-5',
|
||||
language='Russian',
|
||||
ISO_8859_5_RUSSIAN_MODEL = SingleByteCharSetModel(
|
||||
charset_name="ISO-8859-5",
|
||||
language="Russian",
|
||||
char_to_order_map=ISO_8859_5_RUSSIAN_CHAR_TO_ORDER,
|
||||
language_model=RUSSIAN_LANG_MODEL,
|
||||
typical_positive_ratio=0.976601,
|
||||
keep_ascii_letters=False,
|
||||
alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
|
||||
|
||||
alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
|
||||
)
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from chardet.sbcharsetprober import SingleByteCharSetModel
|
||||
|
||||
|
||||
# 3: Positive
|
||||
# 2: Likely
|
||||
# 1: Unlikely
|
||||
|
@ -4373,11 +4369,12 @@ TIS_620_THAI_CHAR_TO_ORDER = {
|
|||
255: 253, # None
|
||||
}
|
||||
|
||||
TIS_620_THAI_MODEL = SingleByteCharSetModel(charset_name='TIS-620',
|
||||
language='Thai',
|
||||
TIS_620_THAI_MODEL = SingleByteCharSetModel(
|
||||
charset_name="TIS-620",
|
||||
language="Thai",
|
||||
char_to_order_map=TIS_620_THAI_CHAR_TO_ORDER,
|
||||
language_model=THAI_LANG_MODEL,
|
||||
typical_positive_ratio=0.926386,
|
||||
keep_ascii_letters=False,
|
||||
alphabet='กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛')
|
||||
|
||||
alphabet="กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛",
|
||||
)
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from chardet.sbcharsetprober import SingleByteCharSetModel
|
||||
|
||||
|
||||
# 3: Positive
|
||||
# 2: Likely
|
||||
# 1: Unlikely
|
||||
|
@ -4373,11 +4369,12 @@ ISO_8859_9_TURKISH_CHAR_TO_ORDER = {
|
|||
255: 107, # 'ÿ'
|
||||
}
|
||||
|
||||
ISO_8859_9_TURKISH_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-9',
|
||||
language='Turkish',
|
||||
ISO_8859_9_TURKISH_MODEL = SingleByteCharSetModel(
|
||||
charset_name="ISO-8859-9",
|
||||
language="Turkish",
|
||||
char_to_order_map=ISO_8859_9_TURKISH_CHAR_TO_ORDER,
|
||||
language_model=TURKISH_LANG_MODEL,
|
||||
typical_positive_ratio=0.97029,
|
||||
keep_ascii_letters=True,
|
||||
alphabet='ABCDEFGHIJKLMNOPRSTUVYZabcdefghijklmnoprstuvyzÂÇÎÖÛÜâçîöûüĞğİıŞş')
|
||||
|
||||
alphabet="ABCDEFGHIJKLMNOPRSTUVYZabcdefghijklmnoprstuvyzÂÇÎÖÛÜâçîöûüĞğİıŞş",
|
||||
)
|
||||
|
|
|
@ -26,6 +26,8 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from typing import List, Union
|
||||
|
||||
from .charsetprober import CharSetProber
|
||||
from .enums import ProbingState
|
||||
|
||||
|
@ -41,6 +43,7 @@ ASV = 6 # accent small vowel
|
|||
ASO = 7 # accent small other
|
||||
CLASS_NUM = 8 # total classes
|
||||
|
||||
# fmt: off
|
||||
Latin1_CharToClass = (
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
|
||||
|
@ -91,34 +94,34 @@ Latin1ClassModel = (
|
|||
0, 3, 1, 3, 1, 1, 1, 3, # ASV
|
||||
0, 3, 1, 3, 1, 1, 3, 3, # ASO
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
|
||||
class Latin1Prober(CharSetProber):
|
||||
def __init__(self):
|
||||
super(Latin1Prober, self).__init__()
|
||||
self._last_char_class = None
|
||||
self._freq_counter = None
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._last_char_class = OTH
|
||||
self._freq_counter: List[int] = []
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
def reset(self) -> None:
|
||||
self._last_char_class = OTH
|
||||
self._freq_counter = [0] * FREQ_CAT_NUM
|
||||
CharSetProber.reset(self)
|
||||
super().reset()
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> str:
|
||||
return "ISO-8859-1"
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
def language(self) -> str:
|
||||
return ""
|
||||
|
||||
def feed(self, byte_str):
|
||||
byte_str = self.filter_with_english_letters(byte_str)
|
||||
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
||||
byte_str = self.remove_xml_tags(byte_str)
|
||||
for c in byte_str:
|
||||
char_class = Latin1_CharToClass[c]
|
||||
freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM)
|
||||
+ char_class]
|
||||
freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM) + char_class]
|
||||
if freq == 0:
|
||||
self._state = ProbingState.NOT_ME
|
||||
break
|
||||
|
@ -127,19 +130,18 @@ class Latin1Prober(CharSetProber):
|
|||
|
||||
return self.state
|
||||
|
||||
def get_confidence(self):
|
||||
def get_confidence(self) -> float:
|
||||
if self.state == ProbingState.NOT_ME:
|
||||
return 0.01
|
||||
|
||||
total = sum(self._freq_counter)
|
||||
if total < 0.01:
|
||||
confidence = 0.0
|
||||
else:
|
||||
confidence = ((self._freq_counter[3] - self._freq_counter[1] * 20.0)
|
||||
/ total)
|
||||
if confidence < 0.0:
|
||||
confidence = 0.0
|
||||
confidence = (
|
||||
0.0
|
||||
if total < 0.01
|
||||
else (self._freq_counter[3] - self._freq_counter[1] * 20.0) / total
|
||||
)
|
||||
confidence = max(confidence, 0.0)
|
||||
# lower the confidence of latin1 so that other more accurate
|
||||
# detector can take priority.
|
||||
confidence = confidence * 0.73
|
||||
confidence *= 0.73
|
||||
return confidence
|
||||
|
|
162
lib/chardet/macromanprober.py
Normal file
162
lib/chardet/macromanprober.py
Normal file
|
@ -0,0 +1,162 @@
|
|||
######################## BEGIN LICENSE BLOCK ########################
|
||||
# This code was modified from latin1prober.py by Rob Speer <rob@lumino.so>.
|
||||
# The Original Code is Mozilla Universal charset detector code.
|
||||
#
|
||||
# The Initial Developer of the Original Code is
|
||||
# Netscape Communications Corporation.
|
||||
# Portions created by the Initial Developer are Copyright (C) 2001
|
||||
# the Initial Developer. All Rights Reserved.
|
||||
#
|
||||
# Contributor(s):
|
||||
# Rob Speer - adapt to MacRoman encoding
|
||||
# Mark Pilgrim - port to Python
|
||||
# Shy Shalom - original C code
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from typing import List, Union
|
||||
|
||||
from .charsetprober import CharSetProber
|
||||
from .enums import ProbingState
|
||||
|
||||
FREQ_CAT_NUM = 4
|
||||
|
||||
UDF = 0 # undefined
|
||||
OTH = 1 # other
|
||||
ASC = 2 # ascii capital letter
|
||||
ASS = 3 # ascii small letter
|
||||
ACV = 4 # accent capital vowel
|
||||
ACO = 5 # accent capital other
|
||||
ASV = 6 # accent small vowel
|
||||
ASO = 7 # accent small other
|
||||
ODD = 8 # character that is unlikely to appear
|
||||
CLASS_NUM = 9 # total classes
|
||||
|
||||
# The change from Latin1 is that we explicitly look for extended characters
|
||||
# that are infrequently-occurring symbols, and consider them to always be
|
||||
# improbable. This should let MacRoman get out of the way of more likely
|
||||
# encodings in most situations.
|
||||
|
||||
# fmt: off
|
||||
MacRoman_CharToClass = (
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
|
||||
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
|
||||
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
|
||||
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
|
||||
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
|
||||
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
|
||||
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
|
||||
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
|
||||
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
|
||||
ACV, ACV, ACO, ACV, ACO, ACV, ACV, ASV, # 80 - 87
|
||||
ASV, ASV, ASV, ASV, ASV, ASO, ASV, ASV, # 88 - 8F
|
||||
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASV, # 90 - 97
|
||||
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # 98 - 9F
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, OTH, ASO, # A0 - A7
|
||||
OTH, OTH, ODD, ODD, OTH, OTH, ACV, ACV, # A8 - AF
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, ASV, ASV, # B8 - BF
|
||||
OTH, OTH, ODD, OTH, ODD, OTH, OTH, OTH, # C0 - C7
|
||||
OTH, OTH, OTH, ACV, ACV, ACV, ACV, ASV, # C8 - CF
|
||||
OTH, OTH, OTH, OTH, OTH, OTH, OTH, ODD, # D0 - D7
|
||||
ASV, ACV, ODD, OTH, OTH, OTH, OTH, OTH, # D8 - DF
|
||||
OTH, OTH, OTH, OTH, OTH, ACV, ACV, ACV, # E0 - E7
|
||||
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # E8 - EF
|
||||
ODD, ACV, ACV, ACV, ACV, ASV, ODD, ODD, # F0 - F7
|
||||
ODD, ODD, ODD, ODD, ODD, ODD, ODD, ODD, # F8 - FF
|
||||
)
|
||||
|
||||
# 0 : illegal
|
||||
# 1 : very unlikely
|
||||
# 2 : normal
|
||||
# 3 : very likely
|
||||
MacRomanClassModel = (
|
||||
# UDF OTH ASC ASS ACV ACO ASV ASO ODD
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, # UDF
|
||||
0, 3, 3, 3, 3, 3, 3, 3, 1, # OTH
|
||||
0, 3, 3, 3, 3, 3, 3, 3, 1, # ASC
|
||||
0, 3, 3, 3, 1, 1, 3, 3, 1, # ASS
|
||||
0, 3, 3, 3, 1, 2, 1, 2, 1, # ACV
|
||||
0, 3, 3, 3, 3, 3, 3, 3, 1, # ACO
|
||||
0, 3, 1, 3, 1, 1, 1, 3, 1, # ASV
|
||||
0, 3, 1, 3, 1, 1, 3, 3, 1, # ASO
|
||||
0, 1, 1, 1, 1, 1, 1, 1, 1, # ODD
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
|
||||
class MacRomanProber(CharSetProber):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._last_char_class = OTH
|
||||
self._freq_counter: List[int] = []
|
||||
self.reset()
|
||||
|
||||
def reset(self) -> None:
|
||||
self._last_char_class = OTH
|
||||
self._freq_counter = [0] * FREQ_CAT_NUM
|
||||
|
||||
# express the prior that MacRoman is a somewhat rare encoding;
|
||||
# this can be done by starting out in a slightly improbable state
|
||||
# that must be overcome
|
||||
self._freq_counter[2] = 10
|
||||
|
||||
super().reset()
|
||||
|
||||
@property
|
||||
def charset_name(self) -> str:
|
||||
return "MacRoman"
|
||||
|
||||
@property
|
||||
def language(self) -> str:
|
||||
return ""
|
||||
|
||||
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
||||
byte_str = self.remove_xml_tags(byte_str)
|
||||
for c in byte_str:
|
||||
char_class = MacRoman_CharToClass[c]
|
||||
freq = MacRomanClassModel[(self._last_char_class * CLASS_NUM) + char_class]
|
||||
if freq == 0:
|
||||
self._state = ProbingState.NOT_ME
|
||||
break
|
||||
self._freq_counter[freq] += 1
|
||||
self._last_char_class = char_class
|
||||
|
||||
return self.state
|
||||
|
||||
def get_confidence(self) -> float:
|
||||
if self.state == ProbingState.NOT_ME:
|
||||
return 0.01
|
||||
|
||||
total = sum(self._freq_counter)
|
||||
confidence = (
|
||||
0.0
|
||||
if total < 0.01
|
||||
else (self._freq_counter[3] - self._freq_counter[1] * 20.0) / total
|
||||
)
|
||||
confidence = max(confidence, 0.0)
|
||||
# lower the confidence of MacRoman so that other more accurate
|
||||
# detector can take priority.
|
||||
confidence *= 0.73
|
||||
return confidence
|
|
@ -27,8 +27,12 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from typing import Optional, Union
|
||||
|
||||
from .chardistribution import CharDistributionAnalysis
|
||||
from .charsetprober import CharSetProber
|
||||
from .enums import ProbingState, MachineState
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .enums import LanguageFilter, MachineState, ProbingState
|
||||
|
||||
|
||||
class MultiByteCharSetProber(CharSetProber):
|
||||
|
@ -36,56 +40,56 @@ class MultiByteCharSetProber(CharSetProber):
|
|||
MultiByteCharSetProber
|
||||
"""
|
||||
|
||||
def __init__(self, lang_filter=None):
|
||||
super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter)
|
||||
self.distribution_analyzer = None
|
||||
self.coding_sm = None
|
||||
self._last_char = [0, 0]
|
||||
def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
|
||||
super().__init__(lang_filter=lang_filter)
|
||||
self.distribution_analyzer: Optional[CharDistributionAnalysis] = None
|
||||
self.coding_sm: Optional[CodingStateMachine] = None
|
||||
self._last_char = bytearray(b"\0\0")
|
||||
|
||||
def reset(self):
|
||||
super(MultiByteCharSetProber, self).reset()
|
||||
def reset(self) -> None:
|
||||
super().reset()
|
||||
if self.coding_sm:
|
||||
self.coding_sm.reset()
|
||||
if self.distribution_analyzer:
|
||||
self.distribution_analyzer.reset()
|
||||
self._last_char = [0, 0]
|
||||
self._last_char = bytearray(b"\0\0")
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
raise NotImplementedError
|
||||
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
||||
assert self.coding_sm is not None
|
||||
assert self.distribution_analyzer is not None
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def feed(self, byte_str):
|
||||
for i in range(len(byte_str)):
|
||||
coding_state = self.coding_sm.next_state(byte_str[i])
|
||||
for i, byte in enumerate(byte_str):
|
||||
coding_state = self.coding_sm.next_state(byte)
|
||||
if coding_state == MachineState.ERROR:
|
||||
self.logger.debug('%s %s prober hit error at byte %s',
|
||||
self.charset_name, self.language, i)
|
||||
self.logger.debug(
|
||||
"%s %s prober hit error at byte %s",
|
||||
self.charset_name,
|
||||
self.language,
|
||||
i,
|
||||
)
|
||||
self._state = ProbingState.NOT_ME
|
||||
break
|
||||
elif coding_state == MachineState.ITS_ME:
|
||||
if coding_state == MachineState.ITS_ME:
|
||||
self._state = ProbingState.FOUND_IT
|
||||
break
|
||||
elif coding_state == MachineState.START:
|
||||
if coding_state == MachineState.START:
|
||||
char_len = self.coding_sm.get_current_charlen()
|
||||
if i == 0:
|
||||
self._last_char[1] = byte_str[0]
|
||||
self._last_char[1] = byte
|
||||
self.distribution_analyzer.feed(self._last_char, char_len)
|
||||
else:
|
||||
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
|
||||
char_len)
|
||||
self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
|
||||
|
||||
self._last_char[0] = byte_str[-1]
|
||||
|
||||
if self.state == ProbingState.DETECTING:
|
||||
if (self.distribution_analyzer.got_enough_data() and
|
||||
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
|
||||
if self.distribution_analyzer.got_enough_data() and (
|
||||
self.get_confidence() > self.SHORTCUT_THRESHOLD
|
||||
):
|
||||
self._state = ProbingState.FOUND_IT
|
||||
|
||||
return self.state
|
||||
|
||||
def get_confidence(self):
|
||||
def get_confidence(self) -> float:
|
||||
assert self.distribution_analyzer is not None
|
||||
return self.distribution_analyzer.get_confidence()
|
||||
|
|
|
@ -27,20 +27,22 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from .charsetgroupprober import CharSetGroupProber
|
||||
from .utf8prober import UTF8Prober
|
||||
from .sjisprober import SJISProber
|
||||
from .eucjpprober import EUCJPProber
|
||||
from .gb2312prober import GB2312Prober
|
||||
from .euckrprober import EUCKRProber
|
||||
from .cp949prober import CP949Prober
|
||||
from .big5prober import Big5Prober
|
||||
from .charsetgroupprober import CharSetGroupProber
|
||||
from .cp949prober import CP949Prober
|
||||
from .enums import LanguageFilter
|
||||
from .eucjpprober import EUCJPProber
|
||||
from .euckrprober import EUCKRProber
|
||||
from .euctwprober import EUCTWProber
|
||||
from .gb2312prober import GB2312Prober
|
||||
from .johabprober import JOHABProber
|
||||
from .sjisprober import SJISProber
|
||||
from .utf8prober import UTF8Prober
|
||||
|
||||
|
||||
class MBCSGroupProber(CharSetGroupProber):
|
||||
def __init__(self, lang_filter=None):
|
||||
super(MBCSGroupProber, self).__init__(lang_filter=lang_filter)
|
||||
def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
|
||||
super().__init__(lang_filter=lang_filter)
|
||||
self.probers = [
|
||||
UTF8Prober(),
|
||||
SJISProber(),
|
||||
|
@ -49,6 +51,7 @@ class MBCSGroupProber(CharSetGroupProber):
|
|||
EUCKRProber(),
|
||||
CP949Prober(),
|
||||
Big5Prober(),
|
||||
EUCTWProber()
|
||||
EUCTWProber(),
|
||||
JOHABProber(),
|
||||
]
|
||||
self.reset()
|
||||
|
|
|
@ -25,10 +25,12 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from .codingstatemachinedict import CodingStateMachineDict
|
||||
from .enums import MachineState
|
||||
|
||||
# BIG5
|
||||
|
||||
# fmt: off
|
||||
BIG5_CLS = (
|
||||
1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as legal value
|
||||
1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
|
||||
|
@ -69,17 +71,20 @@ BIG5_ST = (
|
|||
MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f
|
||||
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0)
|
||||
|
||||
BIG5_SM_MODEL = {'class_table': BIG5_CLS,
|
||||
'class_factor': 5,
|
||||
'state_table': BIG5_ST,
|
||||
'char_len_table': BIG5_CHAR_LEN_TABLE,
|
||||
'name': 'Big5'}
|
||||
BIG5_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": BIG5_CLS,
|
||||
"class_factor": 5,
|
||||
"state_table": BIG5_ST,
|
||||
"char_len_table": BIG5_CHAR_LEN_TABLE,
|
||||
"name": "Big5",
|
||||
}
|
||||
|
||||
# CP949
|
||||
|
||||
# fmt: off
|
||||
CP949_CLS = (
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, # 00 - 0f
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, # 10 - 1f
|
||||
|
@ -109,17 +114,20 @@ CP949_ST = (
|
|||
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5
|
||||
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
|
||||
|
||||
CP949_SM_MODEL = {'class_table': CP949_CLS,
|
||||
'class_factor': 10,
|
||||
'state_table': CP949_ST,
|
||||
'char_len_table': CP949_CHAR_LEN_TABLE,
|
||||
'name': 'CP949'}
|
||||
CP949_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": CP949_CLS,
|
||||
"class_factor": 10,
|
||||
"state_table": CP949_ST,
|
||||
"char_len_table": CP949_CHAR_LEN_TABLE,
|
||||
"name": "CP949",
|
||||
}
|
||||
|
||||
# EUC-JP
|
||||
|
||||
# fmt: off
|
||||
EUCJP_CLS = (
|
||||
4, 4, 4, 4, 4, 4, 4, 4, # 00 - 07
|
||||
4, 4, 4, 4, 4, 4, 5, 5, # 08 - 0f
|
||||
|
@ -162,17 +170,20 @@ EUCJP_ST = (
|
|||
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f
|
||||
3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0)
|
||||
|
||||
EUCJP_SM_MODEL = {'class_table': EUCJP_CLS,
|
||||
'class_factor': 6,
|
||||
'state_table': EUCJP_ST,
|
||||
'char_len_table': EUCJP_CHAR_LEN_TABLE,
|
||||
'name': 'EUC-JP'}
|
||||
EUCJP_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": EUCJP_CLS,
|
||||
"class_factor": 6,
|
||||
"state_table": EUCJP_ST,
|
||||
"char_len_table": EUCJP_CHAR_LEN_TABLE,
|
||||
"name": "EUC-JP",
|
||||
}
|
||||
|
||||
# EUC-KR
|
||||
|
||||
# fmt: off
|
||||
EUCKR_CLS = (
|
||||
1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07
|
||||
1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
|
||||
|
@ -212,17 +223,77 @@ EUCKR_ST = (
|
|||
MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
|
||||
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
|
||||
|
||||
EUCKR_SM_MODEL = {'class_table': EUCKR_CLS,
|
||||
'class_factor': 4,
|
||||
'state_table': EUCKR_ST,
|
||||
'char_len_table': EUCKR_CHAR_LEN_TABLE,
|
||||
'name': 'EUC-KR'}
|
||||
EUCKR_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": EUCKR_CLS,
|
||||
"class_factor": 4,
|
||||
"state_table": EUCKR_ST,
|
||||
"char_len_table": EUCKR_CHAR_LEN_TABLE,
|
||||
"name": "EUC-KR",
|
||||
}
|
||||
|
||||
# JOHAB
|
||||
# fmt: off
|
||||
JOHAB_CLS = (
|
||||
4,4,4,4,4,4,4,4, # 00 - 07
|
||||
4,4,4,4,4,4,0,0, # 08 - 0f
|
||||
4,4,4,4,4,4,4,4, # 10 - 17
|
||||
4,4,4,0,4,4,4,4, # 18 - 1f
|
||||
4,4,4,4,4,4,4,4, # 20 - 27
|
||||
4,4,4,4,4,4,4,4, # 28 - 2f
|
||||
4,3,3,3,3,3,3,3, # 30 - 37
|
||||
3,3,3,3,3,3,3,3, # 38 - 3f
|
||||
3,1,1,1,1,1,1,1, # 40 - 47
|
||||
1,1,1,1,1,1,1,1, # 48 - 4f
|
||||
1,1,1,1,1,1,1,1, # 50 - 57
|
||||
1,1,1,1,1,1,1,1, # 58 - 5f
|
||||
1,1,1,1,1,1,1,1, # 60 - 67
|
||||
1,1,1,1,1,1,1,1, # 68 - 6f
|
||||
1,1,1,1,1,1,1,1, # 70 - 77
|
||||
1,1,1,1,1,1,1,2, # 78 - 7f
|
||||
6,6,6,6,8,8,8,8, # 80 - 87
|
||||
8,8,8,8,8,8,8,8, # 88 - 8f
|
||||
8,7,7,7,7,7,7,7, # 90 - 97
|
||||
7,7,7,7,7,7,7,7, # 98 - 9f
|
||||
7,7,7,7,7,7,7,7, # a0 - a7
|
||||
7,7,7,7,7,7,7,7, # a8 - af
|
||||
7,7,7,7,7,7,7,7, # b0 - b7
|
||||
7,7,7,7,7,7,7,7, # b8 - bf
|
||||
7,7,7,7,7,7,7,7, # c0 - c7
|
||||
7,7,7,7,7,7,7,7, # c8 - cf
|
||||
7,7,7,7,5,5,5,5, # d0 - d7
|
||||
5,9,9,9,9,9,9,5, # d8 - df
|
||||
9,9,9,9,9,9,9,9, # e0 - e7
|
||||
9,9,9,9,9,9,9,9, # e8 - ef
|
||||
9,9,9,9,9,9,9,9, # f0 - f7
|
||||
9,9,5,5,5,5,5,0 # f8 - ff
|
||||
)
|
||||
|
||||
JOHAB_ST = (
|
||||
# cls = 0 1 2 3 4 5 6 7 8 9
|
||||
MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,3 ,3 ,4 , # MachineState.START
|
||||
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
|
||||
MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR , # MachineState.ERROR
|
||||
MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START , # 3
|
||||
MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START , # 4
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
JOHAB_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 0, 0, 2, 2, 2)
|
||||
|
||||
JOHAB_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": JOHAB_CLS,
|
||||
"class_factor": 10,
|
||||
"state_table": JOHAB_ST,
|
||||
"char_len_table": JOHAB_CHAR_LEN_TABLE,
|
||||
"name": "Johab",
|
||||
}
|
||||
|
||||
# EUC-TW
|
||||
|
||||
# fmt: off
|
||||
EUCTW_CLS = (
|
||||
2, 2, 2, 2, 2, 2, 2, 2, # 00 - 07
|
||||
2, 2, 2, 2, 2, 2, 0, 0, # 08 - 0f
|
||||
|
@ -266,17 +337,20 @@ EUCTW_ST = (
|
|||
5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27
|
||||
MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3)
|
||||
|
||||
EUCTW_SM_MODEL = {'class_table': EUCTW_CLS,
|
||||
'class_factor': 7,
|
||||
'state_table': EUCTW_ST,
|
||||
'char_len_table': EUCTW_CHAR_LEN_TABLE,
|
||||
'name': 'x-euc-tw'}
|
||||
EUCTW_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": EUCTW_CLS,
|
||||
"class_factor": 7,
|
||||
"state_table": EUCTW_ST,
|
||||
"char_len_table": EUCTW_CHAR_LEN_TABLE,
|
||||
"name": "x-euc-tw",
|
||||
}
|
||||
|
||||
# GB2312
|
||||
|
||||
# fmt: off
|
||||
GB2312_CLS = (
|
||||
1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07
|
||||
1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
|
||||
|
@ -320,6 +394,7 @@ GB2312_ST = (
|
|||
MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27
|
||||
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
# To be accurate, the length of class 6 can be either 2 or 4.
|
||||
# But it is not necessary to discriminate between the two since
|
||||
|
@ -328,14 +403,16 @@ GB2312_ST = (
|
|||
# 2 here.
|
||||
GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2)
|
||||
|
||||
GB2312_SM_MODEL = {'class_table': GB2312_CLS,
|
||||
'class_factor': 7,
|
||||
'state_table': GB2312_ST,
|
||||
'char_len_table': GB2312_CHAR_LEN_TABLE,
|
||||
'name': 'GB2312'}
|
||||
GB2312_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": GB2312_CLS,
|
||||
"class_factor": 7,
|
||||
"state_table": GB2312_ST,
|
||||
"char_len_table": GB2312_CHAR_LEN_TABLE,
|
||||
"name": "GB2312",
|
||||
}
|
||||
|
||||
# Shift_JIS
|
||||
|
||||
# fmt: off
|
||||
SJIS_CLS = (
|
||||
1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07
|
||||
1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
|
||||
|
@ -370,25 +447,28 @@ SJIS_CLS = (
|
|||
3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7
|
||||
3, 3, 3, 3, 3, 4, 4, 4, # e8 - ef
|
||||
3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7
|
||||
3,3,3,3,3,0,0,0) # f8 - ff
|
||||
|
||||
3, 3, 3, 3, 3, 0, 0, 0, # f8 - ff
|
||||
)
|
||||
|
||||
SJIS_ST = (
|
||||
MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
|
||||
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
|
||||
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0)
|
||||
|
||||
SJIS_SM_MODEL = {'class_table': SJIS_CLS,
|
||||
'class_factor': 6,
|
||||
'state_table': SJIS_ST,
|
||||
'char_len_table': SJIS_CHAR_LEN_TABLE,
|
||||
'name': 'Shift_JIS'}
|
||||
SJIS_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": SJIS_CLS,
|
||||
"class_factor": 6,
|
||||
"state_table": SJIS_ST,
|
||||
"char_len_table": SJIS_CHAR_LEN_TABLE,
|
||||
"name": "Shift_JIS",
|
||||
}
|
||||
|
||||
# UCS2-BE
|
||||
|
||||
# fmt: off
|
||||
UCS2BE_CLS = (
|
||||
0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
|
||||
0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f
|
||||
|
@ -433,17 +513,20 @@ UCS2BE_ST = (
|
|||
5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f
|
||||
6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2)
|
||||
|
||||
UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS,
|
||||
'class_factor': 6,
|
||||
'state_table': UCS2BE_ST,
|
||||
'char_len_table': UCS2BE_CHAR_LEN_TABLE,
|
||||
'name': 'UTF-16BE'}
|
||||
UCS2BE_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": UCS2BE_CLS,
|
||||
"class_factor": 6,
|
||||
"state_table": UCS2BE_ST,
|
||||
"char_len_table": UCS2BE_CHAR_LEN_TABLE,
|
||||
"name": "UTF-16BE",
|
||||
}
|
||||
|
||||
# UCS2-LE
|
||||
|
||||
# fmt: off
|
||||
UCS2LE_CLS = (
|
||||
0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
|
||||
0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f
|
||||
|
@ -488,17 +571,20 @@ UCS2LE_ST = (
|
|||
5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f
|
||||
5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2)
|
||||
|
||||
UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS,
|
||||
'class_factor': 6,
|
||||
'state_table': UCS2LE_ST,
|
||||
'char_len_table': UCS2LE_CHAR_LEN_TABLE,
|
||||
'name': 'UTF-16LE'}
|
||||
UCS2LE_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": UCS2LE_CLS,
|
||||
"class_factor": 6,
|
||||
"state_table": UCS2LE_ST,
|
||||
"char_len_table": UCS2LE_CHAR_LEN_TABLE,
|
||||
"name": "UTF-16LE",
|
||||
}
|
||||
|
||||
# UTF-8
|
||||
|
||||
# fmt: off
|
||||
UTF8_CLS = (
|
||||
1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as a legal value
|
||||
1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
|
||||
|
@ -562,11 +648,14 @@ UTF8_ST = (
|
|||
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7
|
||||
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
|
||||
|
||||
UTF8_SM_MODEL = {'class_table': UTF8_CLS,
|
||||
'class_factor': 16,
|
||||
'state_table': UTF8_ST,
|
||||
'char_len_table': UTF8_CHAR_LEN_TABLE,
|
||||
'name': 'UTF-8'}
|
||||
UTF8_SM_MODEL: CodingStateMachineDict = {
|
||||
"class_table": UTF8_CLS,
|
||||
"class_factor": 16,
|
||||
"state_table": UTF8_ST,
|
||||
"char_len_table": UTF8_CHAR_LEN_TABLE,
|
||||
"name": "UTF-8",
|
||||
}
|
||||
|
|
|
@ -1,19 +1,17 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Metadata about languages used by our model training code for our
|
||||
SingleByteCharSetProbers. Could be used for other things in the future.
|
||||
|
||||
This code is based on the language metadata from the uchardet project.
|
||||
"""
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
from string import ascii_letters
|
||||
from typing import List, Optional
|
||||
|
||||
# TODO: Add Ukrainian (KOI8-U)
|
||||
|
||||
|
||||
# TODO: Add Ukranian (KOI8-U)
|
||||
|
||||
class Language(object):
|
||||
class Language:
|
||||
"""Metadata about a language useful for training models
|
||||
|
||||
:ivar name: The human name for the language, in English.
|
||||
|
@ -33,9 +31,17 @@ class Language(object):
|
|||
Wikipedia for training data.
|
||||
:type wiki_start_pages: list of str
|
||||
"""
|
||||
def __init__(self, name=None, iso_code=None, use_ascii=True, charsets=None,
|
||||
alphabet=None, wiki_start_pages=None):
|
||||
super(Language, self).__init__()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: Optional[str] = None,
|
||||
iso_code: Optional[str] = None,
|
||||
use_ascii: bool = True,
|
||||
charsets: Optional[List[str]] = None,
|
||||
alphabet: Optional[str] = None,
|
||||
wiki_start_pages: Optional[List[str]] = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.name = name
|
||||
self.iso_code = iso_code
|
||||
self.use_ascii = use_ascii
|
||||
|
@ -46,246 +52,282 @@ class Language(object):
|
|||
else:
|
||||
alphabet = ascii_letters
|
||||
elif not alphabet:
|
||||
raise ValueError('Must supply alphabet if use_ascii is False')
|
||||
self.alphabet = ''.join(sorted(set(alphabet))) if alphabet else None
|
||||
raise ValueError("Must supply alphabet if use_ascii is False")
|
||||
self.alphabet = "".join(sorted(set(alphabet))) if alphabet else None
|
||||
self.wiki_start_pages = wiki_start_pages
|
||||
|
||||
def __repr__(self):
|
||||
return '{}({})'.format(self.__class__.__name__,
|
||||
', '.join('{}={!r}'.format(k, v)
|
||||
for k, v in self.__dict__.items()
|
||||
if not k.startswith('_')))
|
||||
def __repr__(self) -> str:
|
||||
param_str = ", ".join(
|
||||
f"{k}={v!r}" for k, v in self.__dict__.items() if not k.startswith("_")
|
||||
)
|
||||
return f"{self.__class__.__name__}({param_str})"
|
||||
|
||||
|
||||
LANGUAGES = {'Arabic': Language(name='Arabic',
|
||||
iso_code='ar',
|
||||
LANGUAGES = {
|
||||
"Arabic": Language(
|
||||
name="Arabic",
|
||||
iso_code="ar",
|
||||
use_ascii=False,
|
||||
# We only support encodings that use isolated
|
||||
# forms, because the current recommendation is
|
||||
# that the rendering system handles presentation
|
||||
# forms. This means we purposefully skip IBM864.
|
||||
charsets=['ISO-8859-6', 'WINDOWS-1256',
|
||||
'CP720', 'CP864'],
|
||||
alphabet=u'ءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىيًٌٍَُِّ',
|
||||
wiki_start_pages=[u'الصفحة_الرئيسية']),
|
||||
'Belarusian': Language(name='Belarusian',
|
||||
iso_code='be',
|
||||
charsets=["ISO-8859-6", "WINDOWS-1256", "CP720", "CP864"],
|
||||
alphabet="ءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىيًٌٍَُِّ",
|
||||
wiki_start_pages=["الصفحة_الرئيسية"],
|
||||
),
|
||||
"Belarusian": Language(
|
||||
name="Belarusian",
|
||||
iso_code="be",
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-5', 'WINDOWS-1251',
|
||||
'IBM866', 'MacCyrillic'],
|
||||
alphabet=(u'АБВГДЕЁЖЗІЙКЛМНОПРСТУЎФХЦЧШЫЬЭЮЯ'
|
||||
u'абвгдеёжзійклмнопрстуўфхцчшыьэюяʼ'),
|
||||
wiki_start_pages=[u'Галоўная_старонка']),
|
||||
'Bulgarian': Language(name='Bulgarian',
|
||||
iso_code='bg',
|
||||
charsets=["ISO-8859-5", "WINDOWS-1251", "IBM866", "MacCyrillic"],
|
||||
alphabet="АБВГДЕЁЖЗІЙКЛМНОПРСТУЎФХЦЧШЫЬЭЮЯабвгдеёжзійклмнопрстуўфхцчшыьэюяʼ",
|
||||
wiki_start_pages=["Галоўная_старонка"],
|
||||
),
|
||||
"Bulgarian": Language(
|
||||
name="Bulgarian",
|
||||
iso_code="bg",
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-5', 'WINDOWS-1251',
|
||||
'IBM855'],
|
||||
alphabet=(u'АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯ'
|
||||
u'абвгдежзийклмнопрстуфхцчшщъьюя'),
|
||||
wiki_start_pages=[u'Начална_страница']),
|
||||
'Czech': Language(name='Czech',
|
||||
iso_code='cz',
|
||||
charsets=["ISO-8859-5", "WINDOWS-1251", "IBM855"],
|
||||
alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя",
|
||||
wiki_start_pages=["Начална_страница"],
|
||||
),
|
||||
"Czech": Language(
|
||||
name="Czech",
|
||||
iso_code="cz",
|
||||
use_ascii=True,
|
||||
charsets=['ISO-8859-2', 'WINDOWS-1250'],
|
||||
alphabet=u'áčďéěíňóřšťúůýžÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ',
|
||||
wiki_start_pages=[u'Hlavní_strana']),
|
||||
'Danish': Language(name='Danish',
|
||||
iso_code='da',
|
||||
charsets=["ISO-8859-2", "WINDOWS-1250"],
|
||||
alphabet="áčďéěíňóřšťúůýžÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ",
|
||||
wiki_start_pages=["Hlavní_strana"],
|
||||
),
|
||||
"Danish": Language(
|
||||
name="Danish",
|
||||
iso_code="da",
|
||||
use_ascii=True,
|
||||
charsets=['ISO-8859-1', 'ISO-8859-15',
|
||||
'WINDOWS-1252'],
|
||||
alphabet=u'æøåÆØÅ',
|
||||
wiki_start_pages=[u'Forside']),
|
||||
'German': Language(name='German',
|
||||
iso_code='de',
|
||||
charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
|
||||
alphabet="æøåÆØÅ",
|
||||
wiki_start_pages=["Forside"],
|
||||
),
|
||||
"German": Language(
|
||||
name="German",
|
||||
iso_code="de",
|
||||
use_ascii=True,
|
||||
charsets=['ISO-8859-1', 'WINDOWS-1252'],
|
||||
alphabet=u'äöüßÄÖÜ',
|
||||
wiki_start_pages=[u'Wikipedia:Hauptseite']),
|
||||
'Greek': Language(name='Greek',
|
||||
iso_code='el',
|
||||
charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
|
||||
alphabet="äöüßẞÄÖÜ",
|
||||
wiki_start_pages=["Wikipedia:Hauptseite"],
|
||||
),
|
||||
"Greek": Language(
|
||||
name="Greek",
|
||||
iso_code="el",
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-7', 'WINDOWS-1253'],
|
||||
alphabet=(u'αβγδεζηθικλμνξοπρσςτυφχψωάέήίόύώ'
|
||||
u'ΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΣΤΥΦΧΨΩΆΈΉΊΌΎΏ'),
|
||||
wiki_start_pages=[u'Πύλη:Κύρια']),
|
||||
'English': Language(name='English',
|
||||
iso_code='en',
|
||||
charsets=["ISO-8859-7", "WINDOWS-1253"],
|
||||
alphabet="αβγδεζηθικλμνξοπρσςτυφχψωάέήίόύώΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΣΤΥΦΧΨΩΆΈΉΊΌΎΏ",
|
||||
wiki_start_pages=["Πύλη:Κύρια"],
|
||||
),
|
||||
"English": Language(
|
||||
name="English",
|
||||
iso_code="en",
|
||||
use_ascii=True,
|
||||
charsets=['ISO-8859-1', 'WINDOWS-1252'],
|
||||
wiki_start_pages=[u'Main_Page']),
|
||||
'Esperanto': Language(name='Esperanto',
|
||||
iso_code='eo',
|
||||
charsets=["ISO-8859-1", "WINDOWS-1252", "MacRoman"],
|
||||
wiki_start_pages=["Main_Page"],
|
||||
),
|
||||
"Esperanto": Language(
|
||||
name="Esperanto",
|
||||
iso_code="eo",
|
||||
# Q, W, X, and Y not used at all
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-3'],
|
||||
alphabet=(u'abcĉdefgĝhĥijĵklmnoprsŝtuŭvz'
|
||||
u'ABCĈDEFGĜHĤIJĴKLMNOPRSŜTUŬVZ'),
|
||||
wiki_start_pages=[u'Vikipedio:Ĉefpaĝo']),
|
||||
'Spanish': Language(name='Spanish',
|
||||
iso_code='es',
|
||||
charsets=["ISO-8859-3"],
|
||||
alphabet="abcĉdefgĝhĥijĵklmnoprsŝtuŭvzABCĈDEFGĜHĤIJĴKLMNOPRSŜTUŬVZ",
|
||||
wiki_start_pages=["Vikipedio:Ĉefpaĝo"],
|
||||
),
|
||||
"Spanish": Language(
|
||||
name="Spanish",
|
||||
iso_code="es",
|
||||
use_ascii=True,
|
||||
charsets=['ISO-8859-1', 'ISO-8859-15',
|
||||
'WINDOWS-1252'],
|
||||
alphabet=u'ñáéíóúüÑÁÉÍÓÚÜ',
|
||||
wiki_start_pages=[u'Wikipedia:Portada']),
|
||||
'Estonian': Language(name='Estonian',
|
||||
iso_code='et',
|
||||
charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
|
||||
alphabet="ñáéíóúüÑÁÉÍÓÚÜ",
|
||||
wiki_start_pages=["Wikipedia:Portada"],
|
||||
),
|
||||
"Estonian": Language(
|
||||
name="Estonian",
|
||||
iso_code="et",
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-4', 'ISO-8859-13',
|
||||
'WINDOWS-1257'],
|
||||
charsets=["ISO-8859-4", "ISO-8859-13", "WINDOWS-1257"],
|
||||
# C, F, Š, Q, W, X, Y, Z, Ž are only for
|
||||
# loanwords
|
||||
alphabet=(u'ABDEGHIJKLMNOPRSTUVÕÄÖÜ'
|
||||
u'abdeghijklmnoprstuvõäöü'),
|
||||
wiki_start_pages=[u'Esileht']),
|
||||
'Finnish': Language(name='Finnish',
|
||||
iso_code='fi',
|
||||
alphabet="ABDEGHIJKLMNOPRSTUVÕÄÖÜabdeghijklmnoprstuvõäöü",
|
||||
wiki_start_pages=["Esileht"],
|
||||
),
|
||||
"Finnish": Language(
|
||||
name="Finnish",
|
||||
iso_code="fi",
|
||||
use_ascii=True,
|
||||
charsets=['ISO-8859-1', 'ISO-8859-15',
|
||||
'WINDOWS-1252'],
|
||||
alphabet=u'ÅÄÖŠŽåäöšž',
|
||||
wiki_start_pages=[u'Wikipedia:Etusivu']),
|
||||
'French': Language(name='French',
|
||||
iso_code='fr',
|
||||
charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
|
||||
alphabet="ÅÄÖŠŽåäöšž",
|
||||
wiki_start_pages=["Wikipedia:Etusivu"],
|
||||
),
|
||||
"French": Language(
|
||||
name="French",
|
||||
iso_code="fr",
|
||||
use_ascii=True,
|
||||
charsets=['ISO-8859-1', 'ISO-8859-15',
|
||||
'WINDOWS-1252'],
|
||||
alphabet=u'œàâçèéîïùûêŒÀÂÇÈÉÎÏÙÛÊ',
|
||||
wiki_start_pages=[u'Wikipédia:Accueil_principal',
|
||||
u'Bœuf (animal)']),
|
||||
'Hebrew': Language(name='Hebrew',
|
||||
iso_code='he',
|
||||
charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
|
||||
alphabet="œàâçèéîïùûêŒÀÂÇÈÉÎÏÙÛÊ",
|
||||
wiki_start_pages=["Wikipédia:Accueil_principal", "Bœuf (animal)"],
|
||||
),
|
||||
"Hebrew": Language(
|
||||
name="Hebrew",
|
||||
iso_code="he",
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-8', 'WINDOWS-1255'],
|
||||
alphabet=u'אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ',
|
||||
wiki_start_pages=[u'עמוד_ראשי']),
|
||||
'Croatian': Language(name='Croatian',
|
||||
iso_code='hr',
|
||||
charsets=["ISO-8859-8", "WINDOWS-1255"],
|
||||
alphabet="אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ",
|
||||
wiki_start_pages=["עמוד_ראשי"],
|
||||
),
|
||||
"Croatian": Language(
|
||||
name="Croatian",
|
||||
iso_code="hr",
|
||||
# Q, W, X, Y are only used for foreign words.
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-2', 'WINDOWS-1250'],
|
||||
alphabet=(u'abcčćdđefghijklmnoprsštuvzž'
|
||||
u'ABCČĆDĐEFGHIJKLMNOPRSŠTUVZŽ'),
|
||||
wiki_start_pages=[u'Glavna_stranica']),
|
||||
'Hungarian': Language(name='Hungarian',
|
||||
iso_code='hu',
|
||||
charsets=["ISO-8859-2", "WINDOWS-1250"],
|
||||
alphabet="abcčćdđefghijklmnoprsštuvzžABCČĆDĐEFGHIJKLMNOPRSŠTUVZŽ",
|
||||
wiki_start_pages=["Glavna_stranica"],
|
||||
),
|
||||
"Hungarian": Language(
|
||||
name="Hungarian",
|
||||
iso_code="hu",
|
||||
# Q, W, X, Y are only used for foreign words.
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-2', 'WINDOWS-1250'],
|
||||
alphabet=(u'abcdefghijklmnoprstuvzáéíóöőúüű'
|
||||
u'ABCDEFGHIJKLMNOPRSTUVZÁÉÍÓÖŐÚÜŰ'),
|
||||
wiki_start_pages=[u'Kezdőlap']),
|
||||
'Italian': Language(name='Italian',
|
||||
iso_code='it',
|
||||
charsets=["ISO-8859-2", "WINDOWS-1250"],
|
||||
alphabet="abcdefghijklmnoprstuvzáéíóöőúüűABCDEFGHIJKLMNOPRSTUVZÁÉÍÓÖŐÚÜŰ",
|
||||
wiki_start_pages=["Kezdőlap"],
|
||||
),
|
||||
"Italian": Language(
|
||||
name="Italian",
|
||||
iso_code="it",
|
||||
use_ascii=True,
|
||||
charsets=['ISO-8859-1', 'ISO-8859-15',
|
||||
'WINDOWS-1252'],
|
||||
alphabet=u'ÀÈÉÌÒÓÙàèéìòóù',
|
||||
wiki_start_pages=[u'Pagina_principale']),
|
||||
'Lithuanian': Language(name='Lithuanian',
|
||||
iso_code='lt',
|
||||
charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
|
||||
alphabet="ÀÈÉÌÒÓÙàèéìòóù",
|
||||
wiki_start_pages=["Pagina_principale"],
|
||||
),
|
||||
"Lithuanian": Language(
|
||||
name="Lithuanian",
|
||||
iso_code="lt",
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-13', 'WINDOWS-1257',
|
||||
'ISO-8859-4'],
|
||||
charsets=["ISO-8859-13", "WINDOWS-1257", "ISO-8859-4"],
|
||||
# Q, W, and X not used at all
|
||||
alphabet=(u'AĄBCČDEĘĖFGHIĮYJKLMNOPRSŠTUŲŪVZŽ'
|
||||
u'aąbcčdeęėfghiįyjklmnoprsštuųūvzž'),
|
||||
wiki_start_pages=[u'Pagrindinis_puslapis']),
|
||||
'Latvian': Language(name='Latvian',
|
||||
iso_code='lv',
|
||||
alphabet="AĄBCČDEĘĖFGHIĮYJKLMNOPRSŠTUŲŪVZŽaąbcčdeęėfghiįyjklmnoprsštuųūvzž",
|
||||
wiki_start_pages=["Pagrindinis_puslapis"],
|
||||
),
|
||||
"Latvian": Language(
|
||||
name="Latvian",
|
||||
iso_code="lv",
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-13', 'WINDOWS-1257',
|
||||
'ISO-8859-4'],
|
||||
charsets=["ISO-8859-13", "WINDOWS-1257", "ISO-8859-4"],
|
||||
# Q, W, X, Y are only for loanwords
|
||||
alphabet=(u'AĀBCČDEĒFGĢHIĪJKĶLĻMNŅOPRSŠTUŪVZŽ'
|
||||
u'aābcčdeēfgģhiījkķlļmnņoprsštuūvzž'),
|
||||
wiki_start_pages=[u'Sākumlapa']),
|
||||
'Macedonian': Language(name='Macedonian',
|
||||
iso_code='mk',
|
||||
alphabet="AĀBCČDEĒFGĢHIĪJKĶLĻMNŅOPRSŠTUŪVZŽaābcčdeēfgģhiījkķlļmnņoprsštuūvzž",
|
||||
wiki_start_pages=["Sākumlapa"],
|
||||
),
|
||||
"Macedonian": Language(
|
||||
name="Macedonian",
|
||||
iso_code="mk",
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-5', 'WINDOWS-1251',
|
||||
'MacCyrillic', 'IBM855'],
|
||||
alphabet=(u'АБВГДЃЕЖЗЅИЈКЛЉМНЊОПРСТЌУФХЦЧЏШ'
|
||||
u'абвгдѓежзѕијклљмнњопрстќуфхцчџш'),
|
||||
wiki_start_pages=[u'Главна_страница']),
|
||||
'Dutch': Language(name='Dutch',
|
||||
iso_code='nl',
|
||||
charsets=["ISO-8859-5", "WINDOWS-1251", "MacCyrillic", "IBM855"],
|
||||
alphabet="АБВГДЃЕЖЗЅИЈКЛЉМНЊОПРСТЌУФХЦЧЏШабвгдѓежзѕијклљмнњопрстќуфхцчџш",
|
||||
wiki_start_pages=["Главна_страница"],
|
||||
),
|
||||
"Dutch": Language(
|
||||
name="Dutch",
|
||||
iso_code="nl",
|
||||
use_ascii=True,
|
||||
charsets=['ISO-8859-1', 'WINDOWS-1252'],
|
||||
wiki_start_pages=[u'Hoofdpagina']),
|
||||
'Polish': Language(name='Polish',
|
||||
iso_code='pl',
|
||||
charsets=["ISO-8859-1", "WINDOWS-1252", "MacRoman"],
|
||||
wiki_start_pages=["Hoofdpagina"],
|
||||
),
|
||||
"Polish": Language(
|
||||
name="Polish",
|
||||
iso_code="pl",
|
||||
# Q and X are only used for foreign words.
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-2', 'WINDOWS-1250'],
|
||||
alphabet=(u'AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻ'
|
||||
u'aąbcćdeęfghijklłmnńoóprsśtuwyzźż'),
|
||||
wiki_start_pages=[u'Wikipedia:Strona_główna']),
|
||||
'Portuguese': Language(name='Portuguese',
|
||||
iso_code='pt',
|
||||
charsets=["ISO-8859-2", "WINDOWS-1250"],
|
||||
alphabet="AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻaąbcćdeęfghijklłmnńoóprsśtuwyzźż",
|
||||
wiki_start_pages=["Wikipedia:Strona_główna"],
|
||||
),
|
||||
"Portuguese": Language(
|
||||
name="Portuguese",
|
||||
iso_code="pt",
|
||||
use_ascii=True,
|
||||
charsets=['ISO-8859-1', 'ISO-8859-15',
|
||||
'WINDOWS-1252'],
|
||||
alphabet=u'ÁÂÃÀÇÉÊÍÓÔÕÚáâãàçéêíóôõú',
|
||||
wiki_start_pages=[u'Wikipédia:Página_principal']),
|
||||
'Romanian': Language(name='Romanian',
|
||||
iso_code='ro',
|
||||
charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
|
||||
alphabet="ÁÂÃÀÇÉÊÍÓÔÕÚáâãàçéêíóôõú",
|
||||
wiki_start_pages=["Wikipédia:Página_principal"],
|
||||
),
|
||||
"Romanian": Language(
|
||||
name="Romanian",
|
||||
iso_code="ro",
|
||||
use_ascii=True,
|
||||
charsets=['ISO-8859-2', 'WINDOWS-1250'],
|
||||
alphabet=u'ăâîșțĂÂÎȘȚ',
|
||||
wiki_start_pages=[u'Pagina_principală']),
|
||||
'Russian': Language(name='Russian',
|
||||
iso_code='ru',
|
||||
charsets=["ISO-8859-2", "WINDOWS-1250"],
|
||||
alphabet="ăâîșțĂÂÎȘȚ",
|
||||
wiki_start_pages=["Pagina_principală"],
|
||||
),
|
||||
"Russian": Language(
|
||||
name="Russian",
|
||||
iso_code="ru",
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-5', 'WINDOWS-1251',
|
||||
'KOI8-R', 'MacCyrillic', 'IBM866',
|
||||
'IBM855'],
|
||||
alphabet=(u'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'
|
||||
u'АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ'),
|
||||
wiki_start_pages=[u'Заглавная_страница']),
|
||||
'Slovak': Language(name='Slovak',
|
||||
iso_code='sk',
|
||||
charsets=[
|
||||
"ISO-8859-5",
|
||||
"WINDOWS-1251",
|
||||
"KOI8-R",
|
||||
"MacCyrillic",
|
||||
"IBM866",
|
||||
"IBM855",
|
||||
],
|
||||
alphabet="абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ",
|
||||
wiki_start_pages=["Заглавная_страница"],
|
||||
),
|
||||
"Slovak": Language(
|
||||
name="Slovak",
|
||||
iso_code="sk",
|
||||
use_ascii=True,
|
||||
charsets=['ISO-8859-2', 'WINDOWS-1250'],
|
||||
alphabet=u'áäčďéíĺľňóôŕšťúýžÁÄČĎÉÍĹĽŇÓÔŔŠŤÚÝŽ',
|
||||
wiki_start_pages=[u'Hlavná_stránka']),
|
||||
'Slovene': Language(name='Slovene',
|
||||
iso_code='sl',
|
||||
charsets=["ISO-8859-2", "WINDOWS-1250"],
|
||||
alphabet="áäčďéíĺľňóôŕšťúýžÁÄČĎÉÍĹĽŇÓÔŔŠŤÚÝŽ",
|
||||
wiki_start_pages=["Hlavná_stránka"],
|
||||
),
|
||||
"Slovene": Language(
|
||||
name="Slovene",
|
||||
iso_code="sl",
|
||||
# Q, W, X, Y are only used for foreign words.
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-2', 'WINDOWS-1250'],
|
||||
alphabet=(u'abcčdefghijklmnoprsštuvzž'
|
||||
u'ABCČDEFGHIJKLMNOPRSŠTUVZŽ'),
|
||||
wiki_start_pages=[u'Glavna_stran']),
|
||||
charsets=["ISO-8859-2", "WINDOWS-1250"],
|
||||
alphabet="abcčdefghijklmnoprsštuvzžABCČDEFGHIJKLMNOPRSŠTUVZŽ",
|
||||
wiki_start_pages=["Glavna_stran"],
|
||||
),
|
||||
# Serbian can be written in both Latin and Cyrillic, but there's no
|
||||
# simple way to get the Latin alphabet pages from Wikipedia through
|
||||
# the API, so for now we just support Cyrillic.
|
||||
'Serbian': Language(name='Serbian',
|
||||
iso_code='sr',
|
||||
alphabet=(u'АБВГДЂЕЖЗИЈКЛЉМНЊОПРСТЋУФХЦЧЏШ'
|
||||
u'абвгдђежзијклљмнњопрстћуфхцчџш'),
|
||||
charsets=['ISO-8859-5', 'WINDOWS-1251',
|
||||
'MacCyrillic', 'IBM855'],
|
||||
wiki_start_pages=[u'Главна_страна']),
|
||||
'Thai': Language(name='Thai',
|
||||
iso_code='th',
|
||||
"Serbian": Language(
|
||||
name="Serbian",
|
||||
iso_code="sr",
|
||||
alphabet="АБВГДЂЕЖЗИЈКЛЉМНЊОПРСТЋУФХЦЧЏШабвгдђежзијклљмнњопрстћуфхцчџш",
|
||||
charsets=["ISO-8859-5", "WINDOWS-1251", "MacCyrillic", "IBM855"],
|
||||
wiki_start_pages=["Главна_страна"],
|
||||
),
|
||||
"Thai": Language(
|
||||
name="Thai",
|
||||
iso_code="th",
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-11', 'TIS-620', 'CP874'],
|
||||
alphabet=u'กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛',
|
||||
wiki_start_pages=[u'หน้าหลัก']),
|
||||
'Turkish': Language(name='Turkish',
|
||||
iso_code='tr',
|
||||
charsets=["ISO-8859-11", "TIS-620", "CP874"],
|
||||
alphabet="กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛",
|
||||
wiki_start_pages=["หน้าหลัก"],
|
||||
),
|
||||
"Turkish": Language(
|
||||
name="Turkish",
|
||||
iso_code="tr",
|
||||
# Q, W, and X are not used by Turkish
|
||||
use_ascii=False,
|
||||
charsets=['ISO-8859-3', 'ISO-8859-9',
|
||||
'WINDOWS-1254'],
|
||||
alphabet=(u'abcçdefgğhıijklmnoöprsştuüvyzâîû'
|
||||
u'ABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZÂÎÛ'),
|
||||
wiki_start_pages=[u'Ana_Sayfa']),
|
||||
'Vietnamese': Language(name='Vietnamese',
|
||||
iso_code='vi',
|
||||
charsets=["ISO-8859-3", "ISO-8859-9", "WINDOWS-1254"],
|
||||
alphabet="abcçdefgğhıijklmnoöprsştuüvyzâîûABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZÂÎÛ",
|
||||
wiki_start_pages=["Ana_Sayfa"],
|
||||
),
|
||||
"Vietnamese": Language(
|
||||
name="Vietnamese",
|
||||
iso_code="vi",
|
||||
use_ascii=False,
|
||||
# Windows-1258 is the only common 8-bit
|
||||
# Vietnamese encoding supported by Python.
|
||||
|
@ -303,8 +345,8 @@ LANGUAGES = {'Arabic': Language(name='Arabic',
|
|||
# scheme has declined dramatically following
|
||||
# the adoption of Unicode on the World Wide
|
||||
# Web.
|
||||
charsets=['WINDOWS-1258'],
|
||||
alphabet=(u'aăâbcdđeêghiklmnoôơpqrstuưvxy'
|
||||
u'AĂÂBCDĐEÊGHIKLMNOÔƠPQRSTUƯVXY'),
|
||||
wiki_start_pages=[u'Chữ_Quốc_ngữ']),
|
||||
charsets=["WINDOWS-1258"],
|
||||
alphabet="aăâbcdđeêghiklmnoôơpqrstuưvxyAĂÂBCDĐEÊGHIKLMNOÔƠPQRSTUƯVXY",
|
||||
wiki_start_pages=["Chữ_Quốc_ngữ"],
|
||||
),
|
||||
}
|
||||
|
|
16
lib/chardet/resultdict.py
Normal file
16
lib/chardet/resultdict.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# TypedDict was introduced in Python 3.8.
|
||||
#
|
||||
# TODO: Remove the else block and TYPE_CHECKING check when dropping support
|
||||
# for Python 3.7.
|
||||
from typing import TypedDict
|
||||
|
||||
class ResultDict(TypedDict):
|
||||
encoding: Optional[str]
|
||||
confidence: float
|
||||
language: Optional[str]
|
||||
|
||||
else:
|
||||
ResultDict = dict
|
|
@ -26,20 +26,20 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from collections import namedtuple
|
||||
from typing import Dict, List, NamedTuple, Optional, Union
|
||||
|
||||
from .charsetprober import CharSetProber
|
||||
from .enums import CharacterCategory, ProbingState, SequenceLikelihood
|
||||
|
||||
|
||||
SingleByteCharSetModel = namedtuple('SingleByteCharSetModel',
|
||||
['charset_name',
|
||||
'language',
|
||||
'char_to_order_map',
|
||||
'language_model',
|
||||
'typical_positive_ratio',
|
||||
'keep_ascii_letters',
|
||||
'alphabet'])
|
||||
class SingleByteCharSetModel(NamedTuple):
|
||||
charset_name: str
|
||||
language: str
|
||||
char_to_order_map: Dict[int, int]
|
||||
language_model: Dict[int, Dict[int, int]]
|
||||
typical_positive_ratio: float
|
||||
keep_ascii_letters: bool
|
||||
alphabet: str
|
||||
|
||||
|
||||
class SingleByteCharSetProber(CharSetProber):
|
||||
|
@ -48,48 +48,55 @@ class SingleByteCharSetProber(CharSetProber):
|
|||
POSITIVE_SHORTCUT_THRESHOLD = 0.95
|
||||
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
|
||||
|
||||
def __init__(self, model, reversed=False, name_prober=None):
|
||||
super(SingleByteCharSetProber, self).__init__()
|
||||
def __init__(
|
||||
self,
|
||||
model: SingleByteCharSetModel,
|
||||
is_reversed: bool = False,
|
||||
name_prober: Optional[CharSetProber] = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self._model = model
|
||||
# TRUE if we need to reverse every pair in the model lookup
|
||||
self._reversed = reversed
|
||||
self._reversed = is_reversed
|
||||
# Optional auxiliary prober for name decision
|
||||
self._name_prober = name_prober
|
||||
self._last_order = None
|
||||
self._seq_counters = None
|
||||
self._total_seqs = None
|
||||
self._total_char = None
|
||||
self._freq_char = None
|
||||
self._last_order = 255
|
||||
self._seq_counters: List[int] = []
|
||||
self._total_seqs = 0
|
||||
self._total_char = 0
|
||||
self._control_char = 0
|
||||
self._freq_char = 0
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
super(SingleByteCharSetProber, self).reset()
|
||||
def reset(self) -> None:
|
||||
super().reset()
|
||||
# char order of last character
|
||||
self._last_order = 255
|
||||
self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
|
||||
self._total_seqs = 0
|
||||
self._total_char = 0
|
||||
self._control_char = 0
|
||||
# characters that fall in our sampling range
|
||||
self._freq_char = 0
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> Optional[str]:
|
||||
if self._name_prober:
|
||||
return self._name_prober.charset_name
|
||||
else:
|
||||
return self._model.charset_name
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
def language(self) -> Optional[str]:
|
||||
if self._name_prober:
|
||||
return self._name_prober.language
|
||||
else:
|
||||
return self._model.language
|
||||
|
||||
def feed(self, byte_str):
|
||||
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
||||
# TODO: Make filter_international_words keep things in self.alphabet
|
||||
if not self._model.keep_ascii_letters:
|
||||
byte_str = self.filter_international_words(byte_str)
|
||||
else:
|
||||
byte_str = self.remove_xml_tags(byte_str)
|
||||
if not byte_str:
|
||||
return self.state
|
||||
char_to_order_map = self._model.char_to_order_map
|
||||
|
@ -103,9 +110,6 @@ class SingleByteCharSetProber(CharSetProber):
|
|||
# _total_char purposes.
|
||||
if order < CharacterCategory.CONTROL:
|
||||
self._total_char += 1
|
||||
# TODO: Follow uchardet's lead and discount confidence for frequent
|
||||
# control characters.
|
||||
# See https://github.com/BYVoid/uchardet/commit/55b4f23971db61
|
||||
if order < self.SAMPLE_SIZE:
|
||||
self._freq_char += 1
|
||||
if self._last_order < self.SAMPLE_SIZE:
|
||||
|
@ -122,23 +126,36 @@ class SingleByteCharSetProber(CharSetProber):
|
|||
if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
|
||||
confidence = self.get_confidence()
|
||||
if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
|
||||
self.logger.debug('%s confidence = %s, we have a winner',
|
||||
charset_name, confidence)
|
||||
self.logger.debug(
|
||||
"%s confidence = %s, we have a winner", charset_name, confidence
|
||||
)
|
||||
self._state = ProbingState.FOUND_IT
|
||||
elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
|
||||
self.logger.debug('%s confidence = %s, below negative '
|
||||
'shortcut threshhold %s', charset_name,
|
||||
self.logger.debug(
|
||||
"%s confidence = %s, below negative shortcut threshold %s",
|
||||
charset_name,
|
||||
confidence,
|
||||
self.NEGATIVE_SHORTCUT_THRESHOLD)
|
||||
self.NEGATIVE_SHORTCUT_THRESHOLD,
|
||||
)
|
||||
self._state = ProbingState.NOT_ME
|
||||
|
||||
return self.state
|
||||
|
||||
def get_confidence(self):
|
||||
def get_confidence(self) -> float:
|
||||
r = 0.01
|
||||
if self._total_seqs > 0:
|
||||
r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) /
|
||||
self._total_seqs / self._model.typical_positive_ratio)
|
||||
r = (
|
||||
(
|
||||
self._seq_counters[SequenceLikelihood.POSITIVE]
|
||||
+ 0.25 * self._seq_counters[SequenceLikelihood.LIKELY]
|
||||
)
|
||||
/ self._total_seqs
|
||||
/ self._model.typical_positive_ratio
|
||||
)
|
||||
# The more control characters (proportionnaly to the size
|
||||
# of the text), the less confident we become in the current
|
||||
# charset.
|
||||
r = r * (self._total_char - self._control_char) / self._total_char
|
||||
r = r * self._freq_char / self._total_char
|
||||
if r >= 1.0:
|
||||
r = 0.99
|
||||
|
|
|
@ -28,33 +28,38 @@
|
|||
|
||||
from .charsetgroupprober import CharSetGroupProber
|
||||
from .hebrewprober import HebrewProber
|
||||
from .langbulgarianmodel import (ISO_8859_5_BULGARIAN_MODEL,
|
||||
WINDOWS_1251_BULGARIAN_MODEL)
|
||||
from .langbulgarianmodel import ISO_8859_5_BULGARIAN_MODEL, WINDOWS_1251_BULGARIAN_MODEL
|
||||
from .langgreekmodel import ISO_8859_7_GREEK_MODEL, WINDOWS_1253_GREEK_MODEL
|
||||
from .langhebrewmodel import WINDOWS_1255_HEBREW_MODEL
|
||||
|
||||
# from .langhungarianmodel import (ISO_8859_2_HUNGARIAN_MODEL,
|
||||
# WINDOWS_1250_HUNGARIAN_MODEL)
|
||||
from .langrussianmodel import (IBM855_RUSSIAN_MODEL, IBM866_RUSSIAN_MODEL,
|
||||
ISO_8859_5_RUSSIAN_MODEL, KOI8_R_RUSSIAN_MODEL,
|
||||
from .langrussianmodel import (
|
||||
IBM855_RUSSIAN_MODEL,
|
||||
IBM866_RUSSIAN_MODEL,
|
||||
ISO_8859_5_RUSSIAN_MODEL,
|
||||
KOI8_R_RUSSIAN_MODEL,
|
||||
MACCYRILLIC_RUSSIAN_MODEL,
|
||||
WINDOWS_1251_RUSSIAN_MODEL)
|
||||
WINDOWS_1251_RUSSIAN_MODEL,
|
||||
)
|
||||
from .langthaimodel import TIS_620_THAI_MODEL
|
||||
from .langturkishmodel import ISO_8859_9_TURKISH_MODEL
|
||||
from .sbcharsetprober import SingleByteCharSetProber
|
||||
|
||||
|
||||
class SBCSGroupProber(CharSetGroupProber):
|
||||
def __init__(self):
|
||||
super(SBCSGroupProber, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
hebrew_prober = HebrewProber()
|
||||
logical_hebrew_prober = SingleByteCharSetProber(WINDOWS_1255_HEBREW_MODEL,
|
||||
False, hebrew_prober)
|
||||
logical_hebrew_prober = SingleByteCharSetProber(
|
||||
WINDOWS_1255_HEBREW_MODEL, is_reversed=False, name_prober=hebrew_prober
|
||||
)
|
||||
# TODO: See if using ISO-8859-8 Hebrew model works better here, since
|
||||
# it's actually the visual one
|
||||
visual_hebrew_prober = SingleByteCharSetProber(WINDOWS_1255_HEBREW_MODEL,
|
||||
True, hebrew_prober)
|
||||
hebrew_prober.set_model_probers(logical_hebrew_prober,
|
||||
visual_hebrew_prober)
|
||||
visual_hebrew_prober = SingleByteCharSetProber(
|
||||
WINDOWS_1255_HEBREW_MODEL, is_reversed=True, name_prober=hebrew_prober
|
||||
)
|
||||
hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober)
|
||||
# TODO: ORDER MATTERS HERE. I changed the order vs what was in master
|
||||
# and several tests failed that did not before. Some thought
|
||||
# should be put into the ordering, and we should consider making
|
||||
|
|
|
@ -25,68 +25,81 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from .mbcharsetprober import MultiByteCharSetProber
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from typing import Union
|
||||
|
||||
from .chardistribution import SJISDistributionAnalysis
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .enums import MachineState, ProbingState
|
||||
from .jpcntx import SJISContextAnalysis
|
||||
from .mbcharsetprober import MultiByteCharSetProber
|
||||
from .mbcssm import SJIS_SM_MODEL
|
||||
from .enums import ProbingState, MachineState
|
||||
|
||||
|
||||
class SJISProber(MultiByteCharSetProber):
|
||||
def __init__(self):
|
||||
super(SJISProber, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.coding_sm = CodingStateMachine(SJIS_SM_MODEL)
|
||||
self.distribution_analyzer = SJISDistributionAnalysis()
|
||||
self.context_analyzer = SJISContextAnalysis()
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
super(SJISProber, self).reset()
|
||||
def reset(self) -> None:
|
||||
super().reset()
|
||||
self.context_analyzer.reset()
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> str:
|
||||
return self.context_analyzer.charset_name
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
def language(self) -> str:
|
||||
return "Japanese"
|
||||
|
||||
def feed(self, byte_str):
|
||||
for i in range(len(byte_str)):
|
||||
coding_state = self.coding_sm.next_state(byte_str[i])
|
||||
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
||||
assert self.coding_sm is not None
|
||||
assert self.distribution_analyzer is not None
|
||||
|
||||
for i, byte in enumerate(byte_str):
|
||||
coding_state = self.coding_sm.next_state(byte)
|
||||
if coding_state == MachineState.ERROR:
|
||||
self.logger.debug('%s %s prober hit error at byte %s',
|
||||
self.charset_name, self.language, i)
|
||||
self.logger.debug(
|
||||
"%s %s prober hit error at byte %s",
|
||||
self.charset_name,
|
||||
self.language,
|
||||
i,
|
||||
)
|
||||
self._state = ProbingState.NOT_ME
|
||||
break
|
||||
elif coding_state == MachineState.ITS_ME:
|
||||
if coding_state == MachineState.ITS_ME:
|
||||
self._state = ProbingState.FOUND_IT
|
||||
break
|
||||
elif coding_state == MachineState.START:
|
||||
if coding_state == MachineState.START:
|
||||
char_len = self.coding_sm.get_current_charlen()
|
||||
if i == 0:
|
||||
self._last_char[1] = byte_str[0]
|
||||
self.context_analyzer.feed(self._last_char[2 - char_len:],
|
||||
char_len)
|
||||
self._last_char[1] = byte
|
||||
self.context_analyzer.feed(
|
||||
self._last_char[2 - char_len :], char_len
|
||||
)
|
||||
self.distribution_analyzer.feed(self._last_char, char_len)
|
||||
else:
|
||||
self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3
|
||||
- char_len], char_len)
|
||||
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
|
||||
char_len)
|
||||
self.context_analyzer.feed(
|
||||
byte_str[i + 1 - char_len : i + 3 - char_len], char_len
|
||||
)
|
||||
self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
|
||||
|
||||
self._last_char[0] = byte_str[-1]
|
||||
|
||||
if self.state == ProbingState.DETECTING:
|
||||
if (self.context_analyzer.got_enough_data() and
|
||||
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
|
||||
if self.context_analyzer.got_enough_data() and (
|
||||
self.get_confidence() > self.SHORTCUT_THRESHOLD
|
||||
):
|
||||
self._state = ProbingState.FOUND_IT
|
||||
|
||||
return self.state
|
||||
|
||||
def get_confidence(self):
|
||||
def get_confidence(self) -> float:
|
||||
assert self.distribution_analyzer is not None
|
||||
|
||||
context_conf = self.context_analyzer.get_confidence()
|
||||
distrib_conf = self.distribution_analyzer.get_confidence()
|
||||
return max(context_conf, distrib_conf)
|
||||
|
|
|
@ -39,16 +39,21 @@ class a user of ``chardet`` should use.
|
|||
import codecs
|
||||
import logging
|
||||
import re
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from .charsetgroupprober import CharSetGroupProber
|
||||
from .charsetprober import CharSetProber
|
||||
from .enums import InputState, LanguageFilter, ProbingState
|
||||
from .escprober import EscCharSetProber
|
||||
from .latin1prober import Latin1Prober
|
||||
from .macromanprober import MacRomanProber
|
||||
from .mbcsgroupprober import MBCSGroupProber
|
||||
from .resultdict import ResultDict
|
||||
from .sbcsgroupprober import SBCSGroupProber
|
||||
from .utf1632prober import UTF1632Prober
|
||||
|
||||
|
||||
class UniversalDetector(object):
|
||||
class UniversalDetector:
|
||||
"""
|
||||
The ``UniversalDetector`` class underlies the ``chardet.detect`` function
|
||||
and coordinates all of the different charset probers.
|
||||
|
@ -66,49 +71,87 @@ class UniversalDetector(object):
|
|||
"""
|
||||
|
||||
MINIMUM_THRESHOLD = 0.20
|
||||
HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]')
|
||||
ESC_DETECTOR = re.compile(b'(\033|~{)')
|
||||
WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]')
|
||||
ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252',
|
||||
'iso-8859-2': 'Windows-1250',
|
||||
'iso-8859-5': 'Windows-1251',
|
||||
'iso-8859-6': 'Windows-1256',
|
||||
'iso-8859-7': 'Windows-1253',
|
||||
'iso-8859-8': 'Windows-1255',
|
||||
'iso-8859-9': 'Windows-1254',
|
||||
'iso-8859-13': 'Windows-1257'}
|
||||
HIGH_BYTE_DETECTOR = re.compile(b"[\x80-\xFF]")
|
||||
ESC_DETECTOR = re.compile(b"(\033|~{)")
|
||||
WIN_BYTE_DETECTOR = re.compile(b"[\x80-\x9F]")
|
||||
ISO_WIN_MAP = {
|
||||
"iso-8859-1": "Windows-1252",
|
||||
"iso-8859-2": "Windows-1250",
|
||||
"iso-8859-5": "Windows-1251",
|
||||
"iso-8859-6": "Windows-1256",
|
||||
"iso-8859-7": "Windows-1253",
|
||||
"iso-8859-8": "Windows-1255",
|
||||
"iso-8859-9": "Windows-1254",
|
||||
"iso-8859-13": "Windows-1257",
|
||||
}
|
||||
# Based on https://encoding.spec.whatwg.org/#names-and-labels
|
||||
# but altered to match Python names for encodings and remove mappings
|
||||
# that break tests.
|
||||
LEGACY_MAP = {
|
||||
"ascii": "Windows-1252",
|
||||
"iso-8859-1": "Windows-1252",
|
||||
"tis-620": "ISO-8859-11",
|
||||
"iso-8859-9": "Windows-1254",
|
||||
"gb2312": "GB18030",
|
||||
"euc-kr": "CP949",
|
||||
"utf-16le": "UTF-16",
|
||||
}
|
||||
|
||||
def __init__(self, lang_filter=LanguageFilter.ALL):
|
||||
self._esc_charset_prober = None
|
||||
self._charset_probers = []
|
||||
self.result = None
|
||||
self.done = None
|
||||
self._got_data = None
|
||||
self._input_state = None
|
||||
self._last_char = None
|
||||
def __init__(
|
||||
self,
|
||||
lang_filter: LanguageFilter = LanguageFilter.ALL,
|
||||
should_rename_legacy: bool = False,
|
||||
) -> None:
|
||||
self._esc_charset_prober: Optional[EscCharSetProber] = None
|
||||
self._utf1632_prober: Optional[UTF1632Prober] = None
|
||||
self._charset_probers: List[CharSetProber] = []
|
||||
self.result: ResultDict = {
|
||||
"encoding": None,
|
||||
"confidence": 0.0,
|
||||
"language": None,
|
||||
}
|
||||
self.done = False
|
||||
self._got_data = False
|
||||
self._input_state = InputState.PURE_ASCII
|
||||
self._last_char = b""
|
||||
self.lang_filter = lang_filter
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._has_win_bytes = None
|
||||
self._has_win_bytes = False
|
||||
self.should_rename_legacy = should_rename_legacy
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
@property
|
||||
def input_state(self) -> int:
|
||||
return self._input_state
|
||||
|
||||
@property
|
||||
def has_win_bytes(self) -> bool:
|
||||
return self._has_win_bytes
|
||||
|
||||
@property
|
||||
def charset_probers(self) -> List[CharSetProber]:
|
||||
return self._charset_probers
|
||||
|
||||
def reset(self) -> None:
|
||||
"""
|
||||
Reset the UniversalDetector and all of its probers back to their
|
||||
initial states. This is called by ``__init__``, so you only need to
|
||||
call this directly in between analyses of different documents.
|
||||
"""
|
||||
self.result = {'encoding': None, 'confidence': 0.0, 'language': None}
|
||||
self.result = {"encoding": None, "confidence": 0.0, "language": None}
|
||||
self.done = False
|
||||
self._got_data = False
|
||||
self._has_win_bytes = False
|
||||
self._input_state = InputState.PURE_ASCII
|
||||
self._last_char = b''
|
||||
self._last_char = b""
|
||||
if self._esc_charset_prober:
|
||||
self._esc_charset_prober.reset()
|
||||
if self._utf1632_prober:
|
||||
self._utf1632_prober.reset()
|
||||
for prober in self._charset_probers:
|
||||
prober.reset()
|
||||
|
||||
def feed(self, byte_str):
|
||||
def feed(self, byte_str: Union[bytes, bytearray]) -> None:
|
||||
"""
|
||||
Takes a chunk of a document and feeds it through all of the relevant
|
||||
charset probers.
|
||||
|
@ -125,7 +168,7 @@ class UniversalDetector(object):
|
|||
if self.done:
|
||||
return
|
||||
|
||||
if not len(byte_str):
|
||||
if not byte_str:
|
||||
return
|
||||
|
||||
if not isinstance(byte_str, bytearray):
|
||||
|
@ -136,35 +179,38 @@ class UniversalDetector(object):
|
|||
# If the data starts with BOM, we know it is UTF
|
||||
if byte_str.startswith(codecs.BOM_UTF8):
|
||||
# EF BB BF UTF-8 with BOM
|
||||
self.result = {'encoding': "UTF-8-SIG",
|
||||
'confidence': 1.0,
|
||||
'language': ''}
|
||||
elif byte_str.startswith((codecs.BOM_UTF32_LE,
|
||||
codecs.BOM_UTF32_BE)):
|
||||
self.result = {
|
||||
"encoding": "UTF-8-SIG",
|
||||
"confidence": 1.0,
|
||||
"language": "",
|
||||
}
|
||||
elif byte_str.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):
|
||||
# FF FE 00 00 UTF-32, little-endian BOM
|
||||
# 00 00 FE FF UTF-32, big-endian BOM
|
||||
self.result = {'encoding': "UTF-32",
|
||||
'confidence': 1.0,
|
||||
'language': ''}
|
||||
elif byte_str.startswith(b'\xFE\xFF\x00\x00'):
|
||||
self.result = {"encoding": "UTF-32", "confidence": 1.0, "language": ""}
|
||||
elif byte_str.startswith(b"\xFE\xFF\x00\x00"):
|
||||
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
|
||||
self.result = {'encoding': "X-ISO-10646-UCS-4-3412",
|
||||
'confidence': 1.0,
|
||||
'language': ''}
|
||||
elif byte_str.startswith(b'\x00\x00\xFF\xFE'):
|
||||
self.result = {
|
||||
# TODO: This encoding is not supported by Python. Should remove?
|
||||
"encoding": "X-ISO-10646-UCS-4-3412",
|
||||
"confidence": 1.0,
|
||||
"language": "",
|
||||
}
|
||||
elif byte_str.startswith(b"\x00\x00\xFF\xFE"):
|
||||
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
|
||||
self.result = {'encoding': "X-ISO-10646-UCS-4-2143",
|
||||
'confidence': 1.0,
|
||||
'language': ''}
|
||||
self.result = {
|
||||
# TODO: This encoding is not supported by Python. Should remove?
|
||||
"encoding": "X-ISO-10646-UCS-4-2143",
|
||||
"confidence": 1.0,
|
||||
"language": "",
|
||||
}
|
||||
elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
|
||||
# FF FE UTF-16, little endian BOM
|
||||
# FE FF UTF-16, big endian BOM
|
||||
self.result = {'encoding': "UTF-16",
|
||||
'confidence': 1.0,
|
||||
'language': ''}
|
||||
self.result = {"encoding": "UTF-16", "confidence": 1.0, "language": ""}
|
||||
|
||||
self._got_data = True
|
||||
if self.result['encoding'] is not None:
|
||||
if self.result["encoding"] is not None:
|
||||
self.done = True
|
||||
return
|
||||
|
||||
|
@ -173,12 +219,29 @@ class UniversalDetector(object):
|
|||
if self._input_state == InputState.PURE_ASCII:
|
||||
if self.HIGH_BYTE_DETECTOR.search(byte_str):
|
||||
self._input_state = InputState.HIGH_BYTE
|
||||
elif self._input_state == InputState.PURE_ASCII and \
|
||||
self.ESC_DETECTOR.search(self._last_char + byte_str):
|
||||
elif (
|
||||
self._input_state == InputState.PURE_ASCII
|
||||
and self.ESC_DETECTOR.search(self._last_char + byte_str)
|
||||
):
|
||||
self._input_state = InputState.ESC_ASCII
|
||||
|
||||
self._last_char = byte_str[-1:]
|
||||
|
||||
# next we will look to see if it is appears to be either a UTF-16 or
|
||||
# UTF-32 encoding
|
||||
if not self._utf1632_prober:
|
||||
self._utf1632_prober = UTF1632Prober()
|
||||
|
||||
if self._utf1632_prober.state == ProbingState.DETECTING:
|
||||
if self._utf1632_prober.feed(byte_str) == ProbingState.FOUND_IT:
|
||||
self.result = {
|
||||
"encoding": self._utf1632_prober.charset_name,
|
||||
"confidence": self._utf1632_prober.get_confidence(),
|
||||
"language": "",
|
||||
}
|
||||
self.done = True
|
||||
return
|
||||
|
||||
# If we've seen escape sequences, use the EscCharSetProber, which
|
||||
# uses a simple state machine to check for known escape sequences in
|
||||
# HZ and ISO-2022 encodings, since those are the only encodings that
|
||||
|
@ -187,12 +250,11 @@ class UniversalDetector(object):
|
|||
if not self._esc_charset_prober:
|
||||
self._esc_charset_prober = EscCharSetProber(self.lang_filter)
|
||||
if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:
|
||||
self.result = {'encoding':
|
||||
self._esc_charset_prober.charset_name,
|
||||
'confidence':
|
||||
self._esc_charset_prober.get_confidence(),
|
||||
'language':
|
||||
self._esc_charset_prober.language}
|
||||
self.result = {
|
||||
"encoding": self._esc_charset_prober.charset_name,
|
||||
"confidence": self._esc_charset_prober.get_confidence(),
|
||||
"language": self._esc_charset_prober.language,
|
||||
}
|
||||
self.done = True
|
||||
# If we've seen high bytes (i.e., those with values greater than 127),
|
||||
# we need to do more complicated checks using all our multi-byte and
|
||||
|
@ -207,17 +269,20 @@ class UniversalDetector(object):
|
|||
if self.lang_filter & LanguageFilter.NON_CJK:
|
||||
self._charset_probers.append(SBCSGroupProber())
|
||||
self._charset_probers.append(Latin1Prober())
|
||||
self._charset_probers.append(MacRomanProber())
|
||||
for prober in self._charset_probers:
|
||||
if prober.feed(byte_str) == ProbingState.FOUND_IT:
|
||||
self.result = {'encoding': prober.charset_name,
|
||||
'confidence': prober.get_confidence(),
|
||||
'language': prober.language}
|
||||
self.result = {
|
||||
"encoding": prober.charset_name,
|
||||
"confidence": prober.get_confidence(),
|
||||
"language": prober.language,
|
||||
}
|
||||
self.done = True
|
||||
break
|
||||
if self.WIN_BYTE_DETECTOR.search(byte_str):
|
||||
self._has_win_bytes = True
|
||||
|
||||
def close(self):
|
||||
def close(self) -> ResultDict:
|
||||
"""
|
||||
Stop analyzing the current document and come up with a final
|
||||
prediction.
|
||||
|
@ -231,13 +296,11 @@ class UniversalDetector(object):
|
|||
self.done = True
|
||||
|
||||
if not self._got_data:
|
||||
self.logger.debug('no data received!')
|
||||
self.logger.debug("no data received!")
|
||||
|
||||
# Default to ASCII if it is all we've seen so far
|
||||
elif self._input_state == InputState.PURE_ASCII:
|
||||
self.result = {'encoding': 'ascii',
|
||||
'confidence': 1.0,
|
||||
'language': ''}
|
||||
self.result = {"encoding": "ascii", "confidence": 1.0, "language": ""}
|
||||
|
||||
# If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD
|
||||
elif self._input_state == InputState.HIGH_BYTE:
|
||||
|
@ -253,34 +316,47 @@ class UniversalDetector(object):
|
|||
max_prober = prober
|
||||
if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):
|
||||
charset_name = max_prober.charset_name
|
||||
lower_charset_name = max_prober.charset_name.lower()
|
||||
assert charset_name is not None
|
||||
lower_charset_name = charset_name.lower()
|
||||
confidence = max_prober.get_confidence()
|
||||
# Use Windows encoding name instead of ISO-8859 if we saw any
|
||||
# extra Windows-specific bytes
|
||||
if lower_charset_name.startswith('iso-8859'):
|
||||
if lower_charset_name.startswith("iso-8859"):
|
||||
if self._has_win_bytes:
|
||||
charset_name = self.ISO_WIN_MAP.get(lower_charset_name,
|
||||
charset_name)
|
||||
self.result = {'encoding': charset_name,
|
||||
'confidence': confidence,
|
||||
'language': max_prober.language}
|
||||
charset_name = self.ISO_WIN_MAP.get(
|
||||
lower_charset_name, charset_name
|
||||
)
|
||||
# Rename legacy encodings with superset encodings if asked
|
||||
if self.should_rename_legacy:
|
||||
charset_name = self.LEGACY_MAP.get(
|
||||
(charset_name or "").lower(), charset_name
|
||||
)
|
||||
self.result = {
|
||||
"encoding": charset_name,
|
||||
"confidence": confidence,
|
||||
"language": max_prober.language,
|
||||
}
|
||||
|
||||
# Log all prober confidences if none met MINIMUM_THRESHOLD
|
||||
if self.logger.getEffectiveLevel() <= logging.DEBUG:
|
||||
if self.result['encoding'] is None:
|
||||
self.logger.debug('no probers hit minimum threshold')
|
||||
if self.result["encoding"] is None:
|
||||
self.logger.debug("no probers hit minimum threshold")
|
||||
for group_prober in self._charset_probers:
|
||||
if not group_prober:
|
||||
continue
|
||||
if isinstance(group_prober, CharSetGroupProber):
|
||||
for prober in group_prober.probers:
|
||||
self.logger.debug('%s %s confidence = %s',
|
||||
self.logger.debug(
|
||||
"%s %s confidence = %s",
|
||||
prober.charset_name,
|
||||
prober.language,
|
||||
prober.get_confidence())
|
||||
prober.get_confidence(),
|
||||
)
|
||||
else:
|
||||
self.logger.debug('%s %s confidence = %s',
|
||||
prober.charset_name,
|
||||
prober.language,
|
||||
prober.get_confidence())
|
||||
self.logger.debug(
|
||||
"%s %s confidence = %s",
|
||||
group_prober.charset_name,
|
||||
group_prober.language,
|
||||
group_prober.get_confidence(),
|
||||
)
|
||||
return self.result
|
||||
|
|
225
lib/chardet/utf1632prober.py
Normal file
225
lib/chardet/utf1632prober.py
Normal file
|
@ -0,0 +1,225 @@
|
|||
######################## BEGIN LICENSE BLOCK ########################
|
||||
#
|
||||
# Contributor(s):
|
||||
# Jason Zavaglia
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
from typing import List, Union
|
||||
|
||||
from .charsetprober import CharSetProber
|
||||
from .enums import ProbingState
|
||||
|
||||
|
||||
class UTF1632Prober(CharSetProber):
|
||||
"""
|
||||
This class simply looks for occurrences of zero bytes, and infers
|
||||
whether the file is UTF16 or UTF32 (low-endian or big-endian)
|
||||
For instance, files looking like ( \0 \0 \0 [nonzero] )+
|
||||
have a good probability to be UTF32BE. Files looking like ( \0 [nonzero] )+
|
||||
may be guessed to be UTF16BE, and inversely for little-endian varieties.
|
||||
"""
|
||||
|
||||
# how many logical characters to scan before feeling confident of prediction
|
||||
MIN_CHARS_FOR_DETECTION = 20
|
||||
# a fixed constant ratio of expected zeros or non-zeros in modulo-position.
|
||||
EXPECTED_RATIO = 0.94
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.position = 0
|
||||
self.zeros_at_mod = [0] * 4
|
||||
self.nonzeros_at_mod = [0] * 4
|
||||
self._state = ProbingState.DETECTING
|
||||
self.quad = [0, 0, 0, 0]
|
||||
self.invalid_utf16be = False
|
||||
self.invalid_utf16le = False
|
||||
self.invalid_utf32be = False
|
||||
self.invalid_utf32le = False
|
||||
self.first_half_surrogate_pair_detected_16be = False
|
||||
self.first_half_surrogate_pair_detected_16le = False
|
||||
self.reset()
|
||||
|
||||
def reset(self) -> None:
|
||||
super().reset()
|
||||
self.position = 0
|
||||
self.zeros_at_mod = [0] * 4
|
||||
self.nonzeros_at_mod = [0] * 4
|
||||
self._state = ProbingState.DETECTING
|
||||
self.invalid_utf16be = False
|
||||
self.invalid_utf16le = False
|
||||
self.invalid_utf32be = False
|
||||
self.invalid_utf32le = False
|
||||
self.first_half_surrogate_pair_detected_16be = False
|
||||
self.first_half_surrogate_pair_detected_16le = False
|
||||
self.quad = [0, 0, 0, 0]
|
||||
|
||||
@property
|
||||
def charset_name(self) -> str:
|
||||
if self.is_likely_utf32be():
|
||||
return "utf-32be"
|
||||
if self.is_likely_utf32le():
|
||||
return "utf-32le"
|
||||
if self.is_likely_utf16be():
|
||||
return "utf-16be"
|
||||
if self.is_likely_utf16le():
|
||||
return "utf-16le"
|
||||
# default to something valid
|
||||
return "utf-16"
|
||||
|
||||
@property
|
||||
def language(self) -> str:
|
||||
return ""
|
||||
|
||||
def approx_32bit_chars(self) -> float:
|
||||
return max(1.0, self.position / 4.0)
|
||||
|
||||
def approx_16bit_chars(self) -> float:
|
||||
return max(1.0, self.position / 2.0)
|
||||
|
||||
def is_likely_utf32be(self) -> bool:
|
||||
approx_chars = self.approx_32bit_chars()
|
||||
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
|
||||
self.zeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
|
||||
and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
|
||||
and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
|
||||
and self.nonzeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
|
||||
and not self.invalid_utf32be
|
||||
)
|
||||
|
||||
def is_likely_utf32le(self) -> bool:
|
||||
approx_chars = self.approx_32bit_chars()
|
||||
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
|
||||
self.nonzeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
|
||||
and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
|
||||
and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
|
||||
and self.zeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
|
||||
and not self.invalid_utf32le
|
||||
)
|
||||
|
||||
def is_likely_utf16be(self) -> bool:
|
||||
approx_chars = self.approx_16bit_chars()
|
||||
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
|
||||
(self.nonzeros_at_mod[1] + self.nonzeros_at_mod[3]) / approx_chars
|
||||
> self.EXPECTED_RATIO
|
||||
and (self.zeros_at_mod[0] + self.zeros_at_mod[2]) / approx_chars
|
||||
> self.EXPECTED_RATIO
|
||||
and not self.invalid_utf16be
|
||||
)
|
||||
|
||||
def is_likely_utf16le(self) -> bool:
|
||||
approx_chars = self.approx_16bit_chars()
|
||||
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
|
||||
(self.nonzeros_at_mod[0] + self.nonzeros_at_mod[2]) / approx_chars
|
||||
> self.EXPECTED_RATIO
|
||||
and (self.zeros_at_mod[1] + self.zeros_at_mod[3]) / approx_chars
|
||||
> self.EXPECTED_RATIO
|
||||
and not self.invalid_utf16le
|
||||
)
|
||||
|
||||
def validate_utf32_characters(self, quad: List[int]) -> None:
|
||||
"""
|
||||
Validate if the quad of bytes is valid UTF-32.
|
||||
|
||||
UTF-32 is valid in the range 0x00000000 - 0x0010FFFF
|
||||
excluding 0x0000D800 - 0x0000DFFF
|
||||
|
||||
https://en.wikipedia.org/wiki/UTF-32
|
||||
"""
|
||||
if (
|
||||
quad[0] != 0
|
||||
or quad[1] > 0x10
|
||||
or (quad[0] == 0 and quad[1] == 0 and 0xD8 <= quad[2] <= 0xDF)
|
||||
):
|
||||
self.invalid_utf32be = True
|
||||
if (
|
||||
quad[3] != 0
|
||||
or quad[2] > 0x10
|
||||
or (quad[3] == 0 and quad[2] == 0 and 0xD8 <= quad[1] <= 0xDF)
|
||||
):
|
||||
self.invalid_utf32le = True
|
||||
|
||||
def validate_utf16_characters(self, pair: List[int]) -> None:
|
||||
"""
|
||||
Validate if the pair of bytes is valid UTF-16.
|
||||
|
||||
UTF-16 is valid in the range 0x0000 - 0xFFFF excluding 0xD800 - 0xFFFF
|
||||
with an exception for surrogate pairs, which must be in the range
|
||||
0xD800-0xDBFF followed by 0xDC00-0xDFFF
|
||||
|
||||
https://en.wikipedia.org/wiki/UTF-16
|
||||
"""
|
||||
if not self.first_half_surrogate_pair_detected_16be:
|
||||
if 0xD8 <= pair[0] <= 0xDB:
|
||||
self.first_half_surrogate_pair_detected_16be = True
|
||||
elif 0xDC <= pair[0] <= 0xDF:
|
||||
self.invalid_utf16be = True
|
||||
else:
|
||||
if 0xDC <= pair[0] <= 0xDF:
|
||||
self.first_half_surrogate_pair_detected_16be = False
|
||||
else:
|
||||
self.invalid_utf16be = True
|
||||
|
||||
if not self.first_half_surrogate_pair_detected_16le:
|
||||
if 0xD8 <= pair[1] <= 0xDB:
|
||||
self.first_half_surrogate_pair_detected_16le = True
|
||||
elif 0xDC <= pair[1] <= 0xDF:
|
||||
self.invalid_utf16le = True
|
||||
else:
|
||||
if 0xDC <= pair[1] <= 0xDF:
|
||||
self.first_half_surrogate_pair_detected_16le = False
|
||||
else:
|
||||
self.invalid_utf16le = True
|
||||
|
||||
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
||||
for c in byte_str:
|
||||
mod4 = self.position % 4
|
||||
self.quad[mod4] = c
|
||||
if mod4 == 3:
|
||||
self.validate_utf32_characters(self.quad)
|
||||
self.validate_utf16_characters(self.quad[0:2])
|
||||
self.validate_utf16_characters(self.quad[2:4])
|
||||
if c == 0:
|
||||
self.zeros_at_mod[mod4] += 1
|
||||
else:
|
||||
self.nonzeros_at_mod[mod4] += 1
|
||||
self.position += 1
|
||||
return self.state
|
||||
|
||||
@property
|
||||
def state(self) -> ProbingState:
|
||||
if self._state in {ProbingState.NOT_ME, ProbingState.FOUND_IT}:
|
||||
# terminal, decided states
|
||||
return self._state
|
||||
if self.get_confidence() > 0.80:
|
||||
self._state = ProbingState.FOUND_IT
|
||||
elif self.position > 4 * 1024:
|
||||
# if we get to 4kb into the file, and we can't conclude it's UTF,
|
||||
# let's give up
|
||||
self._state = ProbingState.NOT_ME
|
||||
return self._state
|
||||
|
||||
def get_confidence(self) -> float:
|
||||
return (
|
||||
0.85
|
||||
if (
|
||||
self.is_likely_utf16le()
|
||||
or self.is_likely_utf16be()
|
||||
or self.is_likely_utf32le()
|
||||
or self.is_likely_utf32be()
|
||||
)
|
||||
else 0.00
|
||||
)
|
|
@ -25,45 +25,46 @@
|
|||
# 02110-1301 USA
|
||||
######################### END LICENSE BLOCK #########################
|
||||
|
||||
from .charsetprober import CharSetProber
|
||||
from .enums import ProbingState, MachineState
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .mbcssm import UTF8_SM_MODEL
|
||||
from typing import Union
|
||||
|
||||
from .charsetprober import CharSetProber
|
||||
from .codingstatemachine import CodingStateMachine
|
||||
from .enums import MachineState, ProbingState
|
||||
from .mbcssm import UTF8_SM_MODEL
|
||||
|
||||
|
||||
class UTF8Prober(CharSetProber):
|
||||
ONE_CHAR_PROB = 0.5
|
||||
|
||||
def __init__(self):
|
||||
super(UTF8Prober, self).__init__()
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
|
||||
self._num_mb_chars = None
|
||||
self._num_mb_chars = 0
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
super(UTF8Prober, self).reset()
|
||||
def reset(self) -> None:
|
||||
super().reset()
|
||||
self.coding_sm.reset()
|
||||
self._num_mb_chars = 0
|
||||
|
||||
@property
|
||||
def charset_name(self):
|
||||
def charset_name(self) -> str:
|
||||
return "utf-8"
|
||||
|
||||
@property
|
||||
def language(self):
|
||||
def language(self) -> str:
|
||||
return ""
|
||||
|
||||
def feed(self, byte_str):
|
||||
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
||||
for c in byte_str:
|
||||
coding_state = self.coding_sm.next_state(c)
|
||||
if coding_state == MachineState.ERROR:
|
||||
self._state = ProbingState.NOT_ME
|
||||
break
|
||||
elif coding_state == MachineState.ITS_ME:
|
||||
if coding_state == MachineState.ITS_ME:
|
||||
self._state = ProbingState.FOUND_IT
|
||||
break
|
||||
elif coding_state == MachineState.START:
|
||||
if coding_state == MachineState.START:
|
||||
if self.coding_sm.get_current_charlen() >= 2:
|
||||
self._num_mb_chars += 1
|
||||
|
||||
|
@ -73,10 +74,9 @@ class UTF8Prober(CharSetProber):
|
|||
|
||||
return self.state
|
||||
|
||||
def get_confidence(self):
|
||||
def get_confidence(self) -> float:
|
||||
unlike = 0.99
|
||||
if self._num_mb_chars < 6:
|
||||
unlike *= self.ONE_CHAR_PROB**self._num_mb_chars
|
||||
return 1.0 - unlike
|
||||
else:
|
||||
return unlike
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
"""
|
||||
This module exists only to simplify retrieving the version number of chardet
|
||||
from within setup.py and from chardet subpackages.
|
||||
from within setuptools and from chardet subpackages.
|
||||
|
||||
:author: Dan Blanchard (dan.blanchard@gmail.com)
|
||||
"""
|
||||
|
||||
__version__ = "4.0.0"
|
||||
VERSION = __version__.split('.')
|
||||
__version__ = "5.1.0"
|
||||
VERSION = __version__.split(".")
|
||||
|
|
Loading…
Reference in a new issue