Merge branch 'feature/UpdateChardet' into dev

This commit is contained in:
JackDandy 2023-02-09 14:38:35 +00:00
commit eacfd57a85
49 changed files with 9067 additions and 5845 deletions

View file

@ -6,6 +6,7 @@
* Remove lockfile no longer used by cachecontrol * Remove lockfile no longer used by cachecontrol
* Update Msgpack 1.0.0 (fa7d744) to 1.0.4 (b5acfd5) * Update Msgpack 1.0.0 (fa7d744) to 1.0.4 (b5acfd5)
* Update certifi 2022.09.24 to 2022.12.07 * Update certifi 2022.09.24 to 2022.12.07
* Update chardet packages 4.0.0 (b3d867a) to 5.1.0 (8087f00)
* Update diskcache 5.1.0 (40ce0de) to 5.4.0 (1cb1425) * Update diskcache 5.1.0 (40ce0de) to 5.4.0 (1cb1425)
* Update feedparser 6.0.1 (98d189fa) to 6.0.10 (5fcb3ae) * Update feedparser 6.0.1 (98d189fa) to 6.0.10 (5fcb3ae)
* Update humanize 3.5.0 (b6b0ea5) to 4.0.0 (a1514eb) * Update humanize 3.5.0 (b6b0ea5) to 4.0.0 (a1514eb)

View file

@ -15,68 +15,101 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from typing import List, Union
from .universaldetector import UniversalDetector from .charsetgroupprober import CharSetGroupProber
from .charsetprober import CharSetProber
from .enums import InputState from .enums import InputState
from .version import __version__, VERSION from .resultdict import ResultDict
from .universaldetector import UniversalDetector
from .version import VERSION, __version__
__all__ = ["UniversalDetector", "detect", "detect_all", "__version__", "VERSION"]
__all__ = ['UniversalDetector', 'detect', 'detect_all', '__version__', 'VERSION'] def detect(
byte_str: Union[bytes, bytearray], should_rename_legacy: bool = False
) -> ResultDict:
def detect(byte_str):
""" """
Detect the encoding of the given byte string. Detect the encoding of the given byte string.
:param byte_str: The byte sequence to examine. :param byte_str: The byte sequence to examine.
:type byte_str: ``bytes`` or ``bytearray`` :type byte_str: ``bytes`` or ``bytearray``
:param should_rename_legacy: Should we rename legacy encodings
to their more modern equivalents?
:type should_rename_legacy: ``bool``
""" """
if not isinstance(byte_str, bytearray): if not isinstance(byte_str, bytearray):
if not isinstance(byte_str, bytes): if not isinstance(byte_str, bytes):
raise TypeError('Expected object of type bytes or bytearray, got: ' raise TypeError(
'{0}'.format(type(byte_str))) f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
else: )
byte_str = bytearray(byte_str) byte_str = bytearray(byte_str)
detector = UniversalDetector() detector = UniversalDetector(should_rename_legacy=should_rename_legacy)
detector.feed(byte_str) detector.feed(byte_str)
return detector.close() return detector.close()
def detect_all(byte_str): def detect_all(
byte_str: Union[bytes, bytearray],
ignore_threshold: bool = False,
should_rename_legacy: bool = False,
) -> List[ResultDict]:
""" """
Detect all the possible encodings of the given byte string. Detect all the possible encodings of the given byte string.
:param byte_str: The byte sequence to examine. :param byte_str: The byte sequence to examine.
:type byte_str: ``bytes`` or ``bytearray`` :type byte_str: ``bytes`` or ``bytearray``
:param ignore_threshold: Include encodings that are below
``UniversalDetector.MINIMUM_THRESHOLD``
in results.
:type ignore_threshold: ``bool``
:param should_rename_legacy: Should we rename legacy encodings
to their more modern equivalents?
:type should_rename_legacy: ``bool``
""" """
if not isinstance(byte_str, bytearray): if not isinstance(byte_str, bytearray):
if not isinstance(byte_str, bytes): if not isinstance(byte_str, bytes):
raise TypeError('Expected object of type bytes or bytearray, got: ' raise TypeError(
'{0}'.format(type(byte_str))) f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
else: )
byte_str = bytearray(byte_str) byte_str = bytearray(byte_str)
detector = UniversalDetector() detector = UniversalDetector(should_rename_legacy=should_rename_legacy)
detector.feed(byte_str) detector.feed(byte_str)
detector.close() detector.close()
if detector._input_state == InputState.HIGH_BYTE: if detector.input_state == InputState.HIGH_BYTE:
results = [] results: List[ResultDict] = []
for prober in detector._charset_probers: probers: List[CharSetProber] = []
if prober.get_confidence() > detector.MINIMUM_THRESHOLD: for prober in detector.charset_probers:
charset_name = prober.charset_name if isinstance(prober, CharSetGroupProber):
lower_charset_name = prober.charset_name.lower() probers.extend(p for p in prober.probers)
else:
probers.append(prober)
for prober in probers:
if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD:
charset_name = prober.charset_name or ""
lower_charset_name = charset_name.lower()
# Use Windows encoding name instead of ISO-8859 if we saw any # Use Windows encoding name instead of ISO-8859 if we saw any
# extra Windows-specific bytes # extra Windows-specific bytes
if lower_charset_name.startswith('iso-8859'): if lower_charset_name.startswith("iso-8859") and detector.has_win_bytes:
if detector._has_win_bytes: charset_name = detector.ISO_WIN_MAP.get(
charset_name = detector.ISO_WIN_MAP.get(lower_charset_name, lower_charset_name, charset_name
charset_name) )
results.append({ # Rename legacy encodings with superset encodings if asked
'encoding': charset_name, if should_rename_legacy:
'confidence': prober.get_confidence() charset_name = detector.LEGACY_MAP.get(
}) charset_name.lower(), charset_name
)
results.append(
{
"encoding": charset_name,
"confidence": prober.get_confidence(),
"language": prober.language,
}
)
if len(results) > 0: if len(results) > 0:
return sorted(results, key=lambda result: -result['confidence']) return sorted(results, key=lambda result: -result["confidence"])
return [detector.result] return [detector.result]

View file

@ -44,7 +44,7 @@ BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table # Char to FreqOrder table
BIG5_TABLE_SIZE = 5376 BIG5_TABLE_SIZE = 5376
# fmt: off
BIG5_CHAR_TO_FREQ_ORDER = ( BIG5_CHAR_TO_FREQ_ORDER = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16 1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32 3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
@ -383,4 +383,4 @@ BIG5_CHAR_TO_FREQ_ORDER = (
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376
) )
# fmt: on

View file

@ -25,23 +25,23 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis from .chardistribution import Big5DistributionAnalysis
from .codingstatemachine import CodingStateMachine
from .mbcharsetprober import MultiByteCharSetProber
from .mbcssm import BIG5_SM_MODEL from .mbcssm import BIG5_SM_MODEL
class Big5Prober(MultiByteCharSetProber): class Big5Prober(MultiByteCharSetProber):
def __init__(self): def __init__(self) -> None:
super(Big5Prober, self).__init__() super().__init__()
self.coding_sm = CodingStateMachine(BIG5_SM_MODEL) self.coding_sm = CodingStateMachine(BIG5_SM_MODEL)
self.distribution_analyzer = Big5DistributionAnalysis() self.distribution_analyzer = Big5DistributionAnalysis()
self.reset() self.reset()
@property @property
def charset_name(self): def charset_name(self) -> str:
return "Big5" return "Big5"
@property @property
def language(self): def language(self) -> str:
return "Chinese" return "Chinese"

View file

@ -25,40 +25,58 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTW_CHAR_TO_FREQ_ORDER, EUCTW_TABLE_SIZE, from typing import Tuple, Union
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKR_CHAR_TO_FREQ_ORDER, EUCKR_TABLE_SIZE, from .big5freq import (
EUCKR_TYPICAL_DISTRIBUTION_RATIO) BIG5_CHAR_TO_FREQ_ORDER,
from .gb2312freq import (GB2312_CHAR_TO_FREQ_ORDER, GB2312_TABLE_SIZE, BIG5_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO) BIG5_TYPICAL_DISTRIBUTION_RATIO,
from .big5freq import (BIG5_CHAR_TO_FREQ_ORDER, BIG5_TABLE_SIZE, )
BIG5_TYPICAL_DISTRIBUTION_RATIO) from .euckrfreq import (
from .jisfreq import (JIS_CHAR_TO_FREQ_ORDER, JIS_TABLE_SIZE, EUCKR_CHAR_TO_FREQ_ORDER,
JIS_TYPICAL_DISTRIBUTION_RATIO) EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO,
)
from .euctwfreq import (
EUCTW_CHAR_TO_FREQ_ORDER,
EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO,
)
from .gb2312freq import (
GB2312_CHAR_TO_FREQ_ORDER,
GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO,
)
from .jisfreq import (
JIS_CHAR_TO_FREQ_ORDER,
JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO,
)
from .johabfreq import JOHAB_TO_EUCKR_ORDER_TABLE
class CharDistributionAnalysis(object): class CharDistributionAnalysis:
ENOUGH_DATA_THRESHOLD = 1024 ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99 SURE_YES = 0.99
SURE_NO = 0.01 SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3 MINIMUM_DATA_THRESHOLD = 3
def __init__(self): def __init__(self) -> None:
# Mapping table to get frequency order from char order (get from # Mapping table to get frequency order from char order (get from
# GetOrder()) # GetOrder())
self._char_to_freq_order = None self._char_to_freq_order: Tuple[int, ...] = tuple()
self._table_size = None # Size of above table self._table_size = 0 # Size of above table
# This is a constant value which varies from language to language, # This is a constant value which varies from language to language,
# used in calculating confidence. See # used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail. # for further detail.
self.typical_distribution_ratio = None self.typical_distribution_ratio = 0.0
self._done = None self._done = False
self._total_chars = None self._total_chars = 0
self._freq_chars = None self._freq_chars = 0
self.reset() self.reset()
def reset(self): def reset(self) -> None:
"""reset analyser, clear any state""" """reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has # If this flag is set to True, detection is done and conclusion has
# been made # been made
@ -67,7 +85,7 @@ class CharDistributionAnalysis(object):
# The number of characters whose frequency order is less than 512 # The number of characters whose frequency order is less than 512
self._freq_chars = 0 self._freq_chars = 0
def feed(self, char, char_len): def feed(self, char: Union[bytes, bytearray], char_len: int) -> None:
"""feed a character with known length""" """feed a character with known length"""
if char_len == 2: if char_len == 2:
# we only care about 2-bytes character in our distribution analysis # we only care about 2-bytes character in our distribution analysis
@ -81,7 +99,7 @@ class CharDistributionAnalysis(object):
if 512 > self._char_to_freq_order[order]: if 512 > self._char_to_freq_order[order]:
self._freq_chars += 1 self._freq_chars += 1
def get_confidence(self): def get_confidence(self) -> float:
"""return confidence based on existing data""" """return confidence based on existing data"""
# if we didn't receive any character in our consideration range, # if we didn't receive any character in our consideration range,
# return negative answer # return negative answer
@ -89,20 +107,21 @@ class CharDistributionAnalysis(object):
return self.SURE_NO return self.SURE_NO
if self._total_chars != self._freq_chars: if self._total_chars != self._freq_chars:
r = (self._freq_chars / ((self._total_chars - self._freq_chars) r = self._freq_chars / (
* self.typical_distribution_ratio)) (self._total_chars - self._freq_chars) * self.typical_distribution_ratio
)
if r < self.SURE_YES: if r < self.SURE_YES:
return r return r
# normalize confidence (we don't want to be 100% sure) # normalize confidence (we don't want to be 100% sure)
return self.SURE_YES return self.SURE_YES
def got_enough_data(self): def got_enough_data(self) -> bool:
# It is not necessary to receive all data to draw conclusion. # It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough # For charset detection, certain amount of data is enough
return self._total_chars > self.ENOUGH_DATA_THRESHOLD return self._total_chars > self.ENOUGH_DATA_THRESHOLD
def get_order(self, byte_str): def get_order(self, _: Union[bytes, bytearray]) -> int:
# We do not handle characters based on the original encoding string, # We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order. # but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency # This allows multiple encodings of a language to share one frequency
@ -111,13 +130,13 @@ class CharDistributionAnalysis(object):
class EUCTWDistributionAnalysis(CharDistributionAnalysis): class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self): def __init__(self) -> None:
super(EUCTWDistributionAnalysis, self).__init__() super().__init__()
self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
self._table_size = EUCTW_TABLE_SIZE self._table_size = EUCTW_TABLE_SIZE
self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str): def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
# for euc-TW encoding, we are interested # for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe # first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe # second byte range: 0xa1 -- 0xfe
@ -125,18 +144,17 @@ class EUCTWDistributionAnalysis(CharDistributionAnalysis):
first_char = byte_str[0] first_char = byte_str[0]
if first_char >= 0xC4: if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1 return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1
else:
return -1 return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis): class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self): def __init__(self) -> None:
super(EUCKRDistributionAnalysis, self).__init__() super().__init__()
self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
self._table_size = EUCKR_TABLE_SIZE self._table_size = EUCKR_TABLE_SIZE
self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str): def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
# for euc-KR encoding, we are interested # for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe # first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe # second byte range: 0xa1 -- 0xfe
@ -144,18 +162,32 @@ class EUCKRDistributionAnalysis(CharDistributionAnalysis):
first_char = byte_str[0] first_char = byte_str[0]
if first_char >= 0xB0: if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1 return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1
else: return -1
class JOHABDistributionAnalysis(CharDistributionAnalysis):
def __init__(self) -> None:
super().__init__()
self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
self._table_size = EUCKR_TABLE_SIZE
self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
first_char = byte_str[0]
if 0x88 <= first_char < 0xD4:
code = first_char * 256 + byte_str[1]
return JOHAB_TO_EUCKR_ORDER_TABLE.get(code, -1)
return -1 return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis): class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self): def __init__(self) -> None:
super(GB2312DistributionAnalysis, self).__init__() super().__init__()
self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
self._table_size = GB2312_TABLE_SIZE self._table_size = GB2312_TABLE_SIZE
self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str): def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
# for GB2312 encoding, we are interested # for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe # first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe # second byte range: 0xa1 -- 0xfe
@ -163,18 +195,17 @@ class GB2312DistributionAnalysis(CharDistributionAnalysis):
first_char, second_char = byte_str[0], byte_str[1] first_char, second_char = byte_str[0], byte_str[1]
if (first_char >= 0xB0) and (second_char >= 0xA1): if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1 return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1 return -1
class Big5DistributionAnalysis(CharDistributionAnalysis): class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self): def __init__(self) -> None:
super(Big5DistributionAnalysis, self).__init__() super().__init__()
self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER
self._table_size = BIG5_TABLE_SIZE self._table_size = BIG5_TABLE_SIZE
self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str): def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
# for big5 encoding, we are interested # for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe # first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
@ -183,28 +214,26 @@ class Big5DistributionAnalysis(CharDistributionAnalysis):
if first_char >= 0xA4: if first_char >= 0xA4:
if second_char >= 0xA1: if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63 return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40 return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1 return -1
class SJISDistributionAnalysis(CharDistributionAnalysis): class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self): def __init__(self) -> None:
super(SJISDistributionAnalysis, self).__init__() super().__init__()
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
self._table_size = JIS_TABLE_SIZE self._table_size = JIS_TABLE_SIZE
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str): def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
# for sjis encoding, we are interested # for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that # no validation needed here. State machine has done that
first_char, second_char = byte_str[0], byte_str[1] first_char, second_char = byte_str[0], byte_str[1]
if (first_char >= 0x81) and (first_char <= 0x9F): if 0x81 <= first_char <= 0x9F:
order = 188 * (first_char - 0x81) order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF): elif 0xE0 <= first_char <= 0xEF:
order = 188 * (first_char - 0xE0 + 31) order = 188 * (first_char - 0xE0 + 31)
else: else:
return -1 return -1
@ -215,19 +244,18 @@ class SJISDistributionAnalysis(CharDistributionAnalysis):
class EUCJPDistributionAnalysis(CharDistributionAnalysis): class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self): def __init__(self) -> None:
super(EUCJPDistributionAnalysis, self).__init__() super().__init__()
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
self._table_size = JIS_TABLE_SIZE self._table_size = JIS_TABLE_SIZE
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str): def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
# for euc-JP encoding, we are interested # for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe # first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe # second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that # no validation needed here. State machine has done that
char = byte_str[0] char = byte_str[0]
if char >= 0xA0: if char >= 0xA0:
return 94 * (char - 0xA1) + byte_str[1] - 0xa1 return 94 * (char - 0xA1) + byte_str[1] - 0xA1
else:
return -1 return -1

View file

@ -25,29 +25,30 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from .enums import ProbingState from typing import List, Optional, Union
from .charsetprober import CharSetProber from .charsetprober import CharSetProber
from .enums import LanguageFilter, ProbingState
class CharSetGroupProber(CharSetProber): class CharSetGroupProber(CharSetProber):
def __init__(self, lang_filter=None): def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
super(CharSetGroupProber, self).__init__(lang_filter=lang_filter) super().__init__(lang_filter=lang_filter)
self._active_num = 0 self._active_num = 0
self.probers = [] self.probers: List[CharSetProber] = []
self._best_guess_prober = None self._best_guess_prober: Optional[CharSetProber] = None
def reset(self): def reset(self) -> None:
super(CharSetGroupProber, self).reset() super().reset()
self._active_num = 0 self._active_num = 0
for prober in self.probers: for prober in self.probers:
if prober:
prober.reset() prober.reset()
prober.active = True prober.active = True
self._active_num += 1 self._active_num += 1
self._best_guess_prober = None self._best_guess_prober = None
@property @property
def charset_name(self): def charset_name(self) -> Optional[str]:
if not self._best_guess_prober: if not self._best_guess_prober:
self.get_confidence() self.get_confidence()
if not self._best_guess_prober: if not self._best_guess_prober:
@ -55,17 +56,15 @@ class CharSetGroupProber(CharSetProber):
return self._best_guess_prober.charset_name return self._best_guess_prober.charset_name
@property @property
def language(self): def language(self) -> Optional[str]:
if not self._best_guess_prober: if not self._best_guess_prober:
self.get_confidence() self.get_confidence()
if not self._best_guess_prober: if not self._best_guess_prober:
return None return None
return self._best_guess_prober.language return self._best_guess_prober.language
def feed(self, byte_str): def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
for prober in self.probers: for prober in self.probers:
if not prober:
continue
if not prober.active: if not prober.active:
continue continue
state = prober.feed(byte_str) state = prober.feed(byte_str)
@ -73,8 +72,9 @@ class CharSetGroupProber(CharSetProber):
continue continue
if state == ProbingState.FOUND_IT: if state == ProbingState.FOUND_IT:
self._best_guess_prober = prober self._best_guess_prober = prober
self._state = ProbingState.FOUND_IT
return self.state return self.state
elif state == ProbingState.NOT_ME: if state == ProbingState.NOT_ME:
prober.active = False prober.active = False
self._active_num -= 1 self._active_num -= 1
if self._active_num <= 0: if self._active_num <= 0:
@ -82,22 +82,22 @@ class CharSetGroupProber(CharSetProber):
return self.state return self.state
return self.state return self.state
def get_confidence(self): def get_confidence(self) -> float:
state = self.state state = self.state
if state == ProbingState.FOUND_IT: if state == ProbingState.FOUND_IT:
return 0.99 return 0.99
elif state == ProbingState.NOT_ME: if state == ProbingState.NOT_ME:
return 0.01 return 0.01
best_conf = 0.0 best_conf = 0.0
self._best_guess_prober = None self._best_guess_prober = None
for prober in self.probers: for prober in self.probers:
if not prober:
continue
if not prober.active: if not prober.active:
self.logger.debug('%s not active', prober.charset_name) self.logger.debug("%s not active", prober.charset_name)
continue continue
conf = prober.get_confidence() conf = prober.get_confidence()
self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf) self.logger.debug(
"%s %s confidence = %s", prober.charset_name, prober.language, conf
)
if best_conf < conf: if best_conf < conf:
best_conf = conf best_conf = conf
self._best_guess_prober = prober self._best_guess_prober = prober

View file

@ -28,54 +28,62 @@
import logging import logging
import re import re
from typing import Optional, Union
from .enums import ProbingState from .enums import LanguageFilter, ProbingState
INTERNATIONAL_WORDS_PATTERN = re.compile(
b"[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?"
)
class CharSetProber(object): class CharSetProber:
SHORTCUT_THRESHOLD = 0.95 SHORTCUT_THRESHOLD = 0.95
def __init__(self, lang_filter=None): def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
self._state = None self._state = ProbingState.DETECTING
self.active = True
self.lang_filter = lang_filter self.lang_filter = lang_filter
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
def reset(self): def reset(self) -> None:
self._state = ProbingState.DETECTING self._state = ProbingState.DETECTING
@property @property
def charset_name(self): def charset_name(self) -> Optional[str]:
return None return None
def feed(self, buf): @property
pass def language(self) -> Optional[str]:
raise NotImplementedError
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
raise NotImplementedError
@property @property
def state(self): def state(self) -> ProbingState:
return self._state return self._state
def get_confidence(self): def get_confidence(self) -> float:
return 0.0 return 0.0
@staticmethod @staticmethod
def filter_high_byte_only(buf): def filter_high_byte_only(buf: Union[bytes, bytearray]) -> bytes:
buf = re.sub(b'([\x00-\x7F])+', b' ', buf) buf = re.sub(b"([\x00-\x7F])+", b" ", buf)
return buf return buf
@staticmethod @staticmethod
def filter_international_words(buf): def filter_international_words(buf: Union[bytes, bytearray]) -> bytearray:
""" """
We define three types of bytes: We define three types of bytes:
alphabet: english alphabets [a-zA-Z] alphabet: english alphabets [a-zA-Z]
international: international characters [\x80-\xFF] international: international characters [\x80-\xFF]
marker: everything else [^a-zA-Z\x80-\xFF] marker: everything else [^a-zA-Z\x80-\xFF]
The input buffer can be thought to contain a series of words delimited The input buffer can be thought to contain a series of words delimited
by markers. This function works to filter all words that contain at by markers. This function works to filter all words that contain at
least one international character. All contiguous sequences of markers least one international character. All contiguous sequences of markers
are replaced by a single space ascii character. are replaced by a single space ascii character.
This filter applies to all scripts which do not use English characters. This filter applies to all scripts which do not use English characters.
""" """
filtered = bytearray() filtered = bytearray()
@ -83,8 +91,7 @@ class CharSetProber(object):
# This regex expression filters out only words that have at-least one # This regex expression filters out only words that have at-least one
# international character. The word may include one marker character at # international character. The word may include one marker character at
# the end. # the end.
words = re.findall(b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?', words = INTERNATIONAL_WORDS_PATTERN.findall(buf)
buf)
for word in words: for word in words:
filtered.extend(word[:-1]) filtered.extend(word[:-1])
@ -94,20 +101,17 @@ class CharSetProber(object):
# similarly across all languages and may thus have similar # similarly across all languages and may thus have similar
# frequencies). # frequencies).
last_char = word[-1:] last_char = word[-1:]
if not last_char.isalpha() and last_char < b'\x80': if not last_char.isalpha() and last_char < b"\x80":
last_char = b' ' last_char = b" "
filtered.extend(last_char) filtered.extend(last_char)
return filtered return filtered
@staticmethod @staticmethod
def filter_with_english_letters(buf): def remove_xml_tags(buf: Union[bytes, bytearray]) -> bytes:
""" """
Returns a copy of ``buf`` that retains only the sequences of English Returns a copy of ``buf`` that retains only the sequences of English
alphabet and high byte characters that are not between <> characters. alphabet and high byte characters that are not between <> characters.
Also retains English alphabet and high byte characters immediately
before occurrences of >.
This filter can be applied to all scripts which contain both English This filter can be applied to all scripts which contain both English
characters and extended ASCII characters, but is currently only used by characters and extended ASCII characters, but is currently only used by
``Latin1Prober``. ``Latin1Prober``.
@ -115,26 +119,24 @@ class CharSetProber(object):
filtered = bytearray() filtered = bytearray()
in_tag = False in_tag = False
prev = 0 prev = 0
buf = memoryview(buf).cast("c")
for curr in range(len(buf)): for curr, buf_char in enumerate(buf):
# Slice here to get bytes instead of an int with Python 3 # Check if we're coming out of or entering an XML tag
buf_char = buf[curr:curr + 1]
# Check if we're coming out of or entering an HTML tag # https://github.com/python/typeshed/issues/8182
if buf_char == b'>': if buf_char == b">": # type: ignore[comparison-overlap]
prev = curr + 1
in_tag = False in_tag = False
elif buf_char == b'<': # https://github.com/python/typeshed/issues/8182
in_tag = True elif buf_char == b"<": # type: ignore[comparison-overlap]
# If current character is not extended-ASCII and not alphabetic...
if buf_char < b'\x80' and not buf_char.isalpha():
# ...and we're not in a tag
if curr > prev and not in_tag: if curr > prev and not in_tag:
# Keep everything after last non-extended-ASCII, # Keep everything after last non-extended-ASCII,
# non-alphabetic character # non-alphabetic character
filtered.extend(buf[prev:curr]) filtered.extend(buf[prev:curr])
# Output a space to delimit stretch we kept # Output a space to delimit stretch we kept
filtered.extend(b' ') filtered.extend(b" ")
prev = curr + 1 in_tag = True
# If we're not in a tag... # If we're not in a tag...
if not in_tag: if not in_tag:

View file

@ -1 +0,0 @@

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
""" """
Script which takes one or more file paths and reports on their detected Script which takes one or more file paths and reports on their detected
encodings encodings
@ -13,17 +12,21 @@ If no paths are provided, it takes its input from stdin.
""" """
from __future__ import absolute_import, print_function, unicode_literals
import argparse import argparse
import sys import sys
from typing import Iterable, List, Optional
from chardet import __version__ from .. import __version__
from chardet.compat import PY2 from ..universaldetector import UniversalDetector
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'): def description_of(
lines: Iterable[bytes],
name: str = "stdin",
minimal: bool = False,
should_rename_legacy: bool = False,
) -> Optional[str]:
""" """
Return a string describing the probable encoding of a file or Return a string describing the probable encoding of a file or
list of strings. list of strings.
@ -32,8 +35,11 @@ def description_of(lines, name='stdin'):
:type lines: Iterable of bytes :type lines: Iterable of bytes
:param name: Name of file or collection of lines :param name: Name of file or collection of lines
:type name: str :type name: str
:param should_rename_legacy: Should we rename legacy encodings to
their more modern equivalents?
:type should_rename_legacy: ``bool``
""" """
u = UniversalDetector() u = UniversalDetector(should_rename_legacy=should_rename_legacy)
for line in lines: for line in lines:
line = bytearray(line) line = bytearray(line)
u.feed(line) u.feed(line)
@ -42,16 +48,14 @@ def description_of(lines, name='stdin'):
break break
u.close() u.close()
result = u.result result = u.result
if PY2: if minimal:
name = name.decode(sys.getfilesystemencoding(), 'ignore') return result["encoding"]
if result['encoding']: if result["encoding"]:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'], return f'{name}: {result["encoding"]} with confidence {result["confidence"]}'
result['confidence']) return f"{name}: no result"
else:
return '{0}: no result'.format(name)
def main(argv=None): def main(argv: Optional[List[str]] = None) -> None:
""" """
Handles command line arguments and gets things started. Handles command line arguments and gets things started.
@ -61,25 +65,48 @@ def main(argv=None):
""" """
# Get command line arguments # Get command line arguments
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \ description=(
encodings") "Takes one or more file paths and reports their detected encodings"
parser.add_argument('input', )
help='File whose encoding we would like to determine. \ )
(default: stdin)', parser.add_argument(
type=argparse.FileType('rb'), nargs='*', "input",
default=[sys.stdin if PY2 else sys.stdin.buffer]) help="File whose encoding we would like to determine. (default: stdin)",
parser.add_argument('--version', action='version', type=argparse.FileType("rb"),
version='%(prog)s {0}'.format(__version__)) nargs="*",
default=[sys.stdin.buffer],
)
parser.add_argument(
"--minimal",
help="Print only the encoding to standard output",
action="store_true",
)
parser.add_argument(
"-l",
"--legacy",
help="Rename legacy encodings to more modern ones.",
action="store_true",
)
parser.add_argument(
"--version", action="version", version=f"%(prog)s {__version__}"
)
args = parser.parse_args(argv) args = parser.parse_args(argv)
for f in args.input: for f in args.input:
if f.isatty(): if f.isatty():
print("You are running chardetect interactively. Press " + print(
"CTRL-D twice at the start of a blank line to signal the " + "You are running chardetect interactively. Press "
"end of your input. If you want help, run chardetect " + "CTRL-D twice at the start of a blank line to signal the "
"--help\n", file=sys.stderr) "end of your input. If you want help, run chardetect "
print(description_of(f, f.name)) "--help\n",
file=sys.stderr,
)
print(
description_of(
f, f.name, minimal=args.minimal, should_rename_legacy=args.legacy
)
)
if __name__ == '__main__': if __name__ == "__main__":
main() main()

View file

@ -27,10 +27,11 @@
import logging import logging
from .codingstatemachinedict import CodingStateMachineDict
from .enums import MachineState from .enums import MachineState
class CodingStateMachine(object): class CodingStateMachine:
""" """
A state machine to verify a byte sequence for a particular encoding. For A state machine to verify a byte sequence for a particular encoding. For
each byte the detector receives, it will feed that byte to every active each byte the detector receives, it will feed that byte to every active
@ -52,37 +53,38 @@ class CodingStateMachine(object):
negative answer for this encoding. Detector will exclude this negative answer for this encoding. Detector will exclude this
encoding from consideration from here on. encoding from consideration from here on.
""" """
def __init__(self, sm):
def __init__(self, sm: CodingStateMachineDict) -> None:
self._model = sm self._model = sm
self._curr_byte_pos = 0 self._curr_byte_pos = 0
self._curr_char_len = 0 self._curr_char_len = 0
self._curr_state = None self._curr_state = MachineState.START
self.active = True
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
self.reset() self.reset()
def reset(self): def reset(self) -> None:
self._curr_state = MachineState.START self._curr_state = MachineState.START
def next_state(self, c): def next_state(self, c: int) -> int:
# for each byte we get its class # for each byte we get its class
# if it is first byte, we also get byte length # if it is first byte, we also get byte length
byte_class = self._model['class_table'][c] byte_class = self._model["class_table"][c]
if self._curr_state == MachineState.START: if self._curr_state == MachineState.START:
self._curr_byte_pos = 0 self._curr_byte_pos = 0
self._curr_char_len = self._model['char_len_table'][byte_class] self._curr_char_len = self._model["char_len_table"][byte_class]
# from byte's class and state_table, we get its next state # from byte's class and state_table, we get its next state
curr_state = (self._curr_state * self._model['class_factor'] curr_state = self._curr_state * self._model["class_factor"] + byte_class
+ byte_class) self._curr_state = self._model["state_table"][curr_state]
self._curr_state = self._model['state_table'][curr_state]
self._curr_byte_pos += 1 self._curr_byte_pos += 1
return self._curr_state return self._curr_state
def get_current_charlen(self): def get_current_charlen(self) -> int:
return self._curr_char_len return self._curr_char_len
def get_coding_state_machine(self): def get_coding_state_machine(self) -> str:
return self._model['name'] return self._model["name"]
@property @property
def language(self): def language(self) -> str:
return self._model['language'] return self._model["language"]

View file

@ -0,0 +1,19 @@
from typing import TYPE_CHECKING, Tuple
if TYPE_CHECKING:
# TypedDict was introduced in Python 3.8.
#
# TODO: Remove the else block and TYPE_CHECKING check when dropping support
# for Python 3.7.
from typing import TypedDict
class CodingStateMachineDict(TypedDict, total=False):
class_table: Tuple[int, ...]
class_factor: int
state_table: Tuple[int, ...]
char_len_table: Tuple[int, ...]
name: str
language: str # Optional key
else:
CodingStateMachineDict = dict

View file

@ -1,36 +0,0 @@
######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Dan Blanchard
# Ian Cordasco
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
PY2 = True
PY3 = False
string_types = (str, unicode)
text_type = unicode
iteritems = dict.iteritems
else:
PY2 = False
PY3 = True
string_types = (bytes, str)
text_type = str
iteritems = dict.items

View file

@ -32,8 +32,8 @@ from .mbcssm import CP949_SM_MODEL
class CP949Prober(MultiByteCharSetProber): class CP949Prober(MultiByteCharSetProber):
def __init__(self): def __init__(self) -> None:
super(CP949Prober, self).__init__() super().__init__()
self.coding_sm = CodingStateMachine(CP949_SM_MODEL) self.coding_sm = CodingStateMachine(CP949_SM_MODEL)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be # NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different. # not different.
@ -41,9 +41,9 @@ class CP949Prober(MultiByteCharSetProber):
self.reset() self.reset()
@property @property
def charset_name(self): def charset_name(self) -> str:
return "CP949" return "CP949"
@property @property
def language(self): def language(self) -> str:
return "Korean" return "Korean"

View file

@ -4,21 +4,26 @@ All of the Enums that are used throughout the chardet package.
:author: Dan Blanchard (dan.blanchard@gmail.com) :author: Dan Blanchard (dan.blanchard@gmail.com)
""" """
from enum import Enum, Flag
class InputState(object):
class InputState:
""" """
This enum represents the different states a universal detector can be in. This enum represents the different states a universal detector can be in.
""" """
PURE_ASCII = 0 PURE_ASCII = 0
ESC_ASCII = 1 ESC_ASCII = 1
HIGH_BYTE = 2 HIGH_BYTE = 2
class LanguageFilter(object): class LanguageFilter(Flag):
""" """
This enum represents the different language filters we can apply to a This enum represents the different language filters we can apply to a
``UniversalDetector``. ``UniversalDetector``.
""" """
NONE = 0x00
CHINESE_SIMPLIFIED = 0x01 CHINESE_SIMPLIFIED = 0x01
CHINESE_TRADITIONAL = 0x02 CHINESE_TRADITIONAL = 0x02
JAPANESE = 0x04 JAPANESE = 0x04
@ -29,46 +34,50 @@ class LanguageFilter(object):
CJK = CHINESE | JAPANESE | KOREAN CJK = CHINESE | JAPANESE | KOREAN
class ProbingState(object): class ProbingState(Enum):
""" """
This enum represents the different states a prober can be in. This enum represents the different states a prober can be in.
""" """
DETECTING = 0 DETECTING = 0
FOUND_IT = 1 FOUND_IT = 1
NOT_ME = 2 NOT_ME = 2
class MachineState(object): class MachineState:
""" """
This enum represents the different states a state machine can be in. This enum represents the different states a state machine can be in.
""" """
START = 0 START = 0
ERROR = 1 ERROR = 1
ITS_ME = 2 ITS_ME = 2
class SequenceLikelihood(object): class SequenceLikelihood:
""" """
This enum represents the likelihood of a character following the previous one. This enum represents the likelihood of a character following the previous one.
""" """
NEGATIVE = 0 NEGATIVE = 0
UNLIKELY = 1 UNLIKELY = 1
LIKELY = 2 LIKELY = 2
POSITIVE = 3 POSITIVE = 3
@classmethod @classmethod
def get_num_categories(cls): def get_num_categories(cls) -> int:
""":returns: The number of likelihood categories in the enum.""" """:returns: The number of likelihood categories in the enum."""
return 4 return 4
class CharacterCategory(object): class CharacterCategory:
""" """
This enum represents the different categories language models for This enum represents the different categories language models for
``SingleByteCharsetProber`` put characters into. ``SingleByteCharsetProber`` put characters into.
Anything less than CONTROL is considered a letter. Anything less than CONTROL is considered a letter.
""" """
UNDEFINED = 255 UNDEFINED = 255
LINE_BREAK = 254 LINE_BREAK = 254
SYMBOL = 253 SYMBOL = 253

View file

@ -25,11 +25,17 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from typing import Optional, Union
from .charsetprober import CharSetProber from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine from .codingstatemachine import CodingStateMachine
from .enums import LanguageFilter, ProbingState, MachineState from .enums import LanguageFilter, MachineState, ProbingState
from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL, from .escsm import (
ISO2022KR_SM_MODEL) HZ_SM_MODEL,
ISO2022CN_SM_MODEL,
ISO2022JP_SM_MODEL,
ISO2022KR_SM_MODEL,
)
class EscCharSetProber(CharSetProber): class EscCharSetProber(CharSetProber):
@ -39,8 +45,8 @@ class EscCharSetProber(CharSetProber):
identify these encodings. identify these encodings.
""" """
def __init__(self, lang_filter=None): def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
super(EscCharSetProber, self).__init__(lang_filter=lang_filter) super().__init__(lang_filter=lang_filter)
self.coding_sm = [] self.coding_sm = []
if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED: if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED:
self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL)) self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL))
@ -49,17 +55,15 @@ class EscCharSetProber(CharSetProber):
self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL)) self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL))
if self.lang_filter & LanguageFilter.KOREAN: if self.lang_filter & LanguageFilter.KOREAN:
self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL)) self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL))
self.active_sm_count = None self.active_sm_count = 0
self._detected_charset = None self._detected_charset: Optional[str] = None
self._detected_language = None self._detected_language: Optional[str] = None
self._state = None self._state = ProbingState.DETECTING
self.reset() self.reset()
def reset(self): def reset(self) -> None:
super(EscCharSetProber, self).reset() super().reset()
for coding_sm in self.coding_sm: for coding_sm in self.coding_sm:
if not coding_sm:
continue
coding_sm.active = True coding_sm.active = True
coding_sm.reset() coding_sm.reset()
self.active_sm_count = len(self.coding_sm) self.active_sm_count = len(self.coding_sm)
@ -67,23 +71,20 @@ class EscCharSetProber(CharSetProber):
self._detected_language = None self._detected_language = None
@property @property
def charset_name(self): def charset_name(self) -> Optional[str]:
return self._detected_charset return self._detected_charset
@property @property
def language(self): def language(self) -> Optional[str]:
return self._detected_language return self._detected_language
def get_confidence(self): def get_confidence(self) -> float:
if self._detected_charset: return 0.99 if self._detected_charset else 0.00
return 0.99
else:
return 0.00
def feed(self, byte_str): def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
for c in byte_str: for c in byte_str:
for coding_sm in self.coding_sm: for coding_sm in self.coding_sm:
if not coding_sm or not coding_sm.active: if not coding_sm.active:
continue continue
coding_state = coding_sm.next_state(c) coding_state = coding_sm.next_state(c)
if coding_state == MachineState.ERROR: if coding_state == MachineState.ERROR:

View file

@ -25,8 +25,10 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from .codingstatemachinedict import CodingStateMachineDict
from .enums import MachineState from .enums import MachineState
# fmt: off
HZ_CLS = ( HZ_CLS = (
1, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 1, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f 0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f
@ -70,16 +72,20 @@ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,Ma
4, MachineState.ERROR, 4, 4, 4, MachineState.ERROR, 4, MachineState.ERROR, # 20-27 4, MachineState.ERROR, 4, 4, 4, MachineState.ERROR, 4, MachineState.ERROR, # 20-27
4, MachineState.ITS_ME, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 28-2f 4, MachineState.ITS_ME, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 28-2f
) )
# fmt: on
HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0) HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
HZ_SM_MODEL = {'class_table': HZ_CLS, HZ_SM_MODEL: CodingStateMachineDict = {
'class_factor': 6, "class_table": HZ_CLS,
'state_table': HZ_ST, "class_factor": 6,
'char_len_table': HZ_CHAR_LEN_TABLE, "state_table": HZ_ST,
'name': "HZ-GB-2312", "char_len_table": HZ_CHAR_LEN_TABLE,
'language': 'Chinese'} "name": "HZ-GB-2312",
"language": "Chinese",
}
# fmt: off
ISO2022CN_CLS = ( ISO2022CN_CLS = (
2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f 0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f
@ -125,16 +131,20 @@ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,Mac
MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 30-37 MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 30-37
MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.START, # 38-3f MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.START, # 38-3f
) )
# fmt: on
ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0) ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS, ISO2022CN_SM_MODEL: CodingStateMachineDict = {
'class_factor': 9, "class_table": ISO2022CN_CLS,
'state_table': ISO2022CN_ST, "class_factor": 9,
'char_len_table': ISO2022CN_CHAR_LEN_TABLE, "state_table": ISO2022CN_ST,
'name': "ISO-2022-CN", "char_len_table": ISO2022CN_CHAR_LEN_TABLE,
'language': 'Chinese'} "name": "ISO-2022-CN",
"language": "Chinese",
}
# fmt: off
ISO2022JP_CLS = ( ISO2022JP_CLS = (
2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
0, 0, 0, 0, 0, 0, 2, 2, # 08 - 0f 0, 0, 0, 0, 0, 0, 2, 2, # 08 - 0f
@ -181,16 +191,20 @@ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,Mach
MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 38-3f MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 38-3f
MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.START, MachineState.START, # 40-47 MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.START, MachineState.START, # 40-47
) )
# fmt: on
ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0) ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS, ISO2022JP_SM_MODEL: CodingStateMachineDict = {
'class_factor': 10, "class_table": ISO2022JP_CLS,
'state_table': ISO2022JP_ST, "class_factor": 10,
'char_len_table': ISO2022JP_CHAR_LEN_TABLE, "state_table": ISO2022JP_ST,
'name': "ISO-2022-JP", "char_len_table": ISO2022JP_CHAR_LEN_TABLE,
'language': 'Japanese'} "name": "ISO-2022-JP",
"language": "Japanese",
}
# fmt: off
ISO2022KR_CLS = ( ISO2022KR_CLS = (
2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f 0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f
@ -233,14 +247,15 @@ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,Ma
MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, 5, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 18-1f MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, 5, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 18-1f
MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 20-27 MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 20-27
) )
# fmt: on
ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0) ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS, ISO2022KR_SM_MODEL: CodingStateMachineDict = {
'class_factor': 6, "class_table": ISO2022KR_CLS,
'state_table': ISO2022KR_ST, "class_factor": 6,
'char_len_table': ISO2022KR_CHAR_LEN_TABLE, "state_table": ISO2022KR_ST,
'name': "ISO-2022-KR", "char_len_table": ISO2022KR_CHAR_LEN_TABLE,
'language': 'Korean'} "name": "ISO-2022-KR",
"language": "Korean",
}

View file

@ -25,68 +25,78 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from .enums import ProbingState, MachineState from typing import Union
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis from .chardistribution import EUCJPDistributionAnalysis
from .codingstatemachine import CodingStateMachine
from .enums import MachineState, ProbingState
from .jpcntx import EUCJPContextAnalysis from .jpcntx import EUCJPContextAnalysis
from .mbcharsetprober import MultiByteCharSetProber
from .mbcssm import EUCJP_SM_MODEL from .mbcssm import EUCJP_SM_MODEL
class EUCJPProber(MultiByteCharSetProber): class EUCJPProber(MultiByteCharSetProber):
def __init__(self): def __init__(self) -> None:
super(EUCJPProber, self).__init__() super().__init__()
self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL) self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
self.distribution_analyzer = EUCJPDistributionAnalysis() self.distribution_analyzer = EUCJPDistributionAnalysis()
self.context_analyzer = EUCJPContextAnalysis() self.context_analyzer = EUCJPContextAnalysis()
self.reset() self.reset()
def reset(self): def reset(self) -> None:
super(EUCJPProber, self).reset() super().reset()
self.context_analyzer.reset() self.context_analyzer.reset()
@property @property
def charset_name(self): def charset_name(self) -> str:
return "EUC-JP" return "EUC-JP"
@property @property
def language(self): def language(self) -> str:
return "Japanese" return "Japanese"
def feed(self, byte_str): def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
for i in range(len(byte_str)): assert self.coding_sm is not None
# PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte assert self.distribution_analyzer is not None
coding_state = self.coding_sm.next_state(byte_str[i])
for i, byte in enumerate(byte_str):
# PY3K: byte_str is a byte array, so byte is an int, not a byte
coding_state = self.coding_sm.next_state(byte)
if coding_state == MachineState.ERROR: if coding_state == MachineState.ERROR:
self.logger.debug('%s %s prober hit error at byte %s', self.logger.debug(
self.charset_name, self.language, i) "%s %s prober hit error at byte %s",
self.charset_name,
self.language,
i,
)
self._state = ProbingState.NOT_ME self._state = ProbingState.NOT_ME
break break
elif coding_state == MachineState.ITS_ME: if coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT self._state = ProbingState.FOUND_IT
break break
elif coding_state == MachineState.START: if coding_state == MachineState.START:
char_len = self.coding_sm.get_current_charlen() char_len = self.coding_sm.get_current_charlen()
if i == 0: if i == 0:
self._last_char[1] = byte_str[0] self._last_char[1] = byte
self.context_analyzer.feed(self._last_char, char_len) self.context_analyzer.feed(self._last_char, char_len)
self.distribution_analyzer.feed(self._last_char, char_len) self.distribution_analyzer.feed(self._last_char, char_len)
else: else:
self.context_analyzer.feed(byte_str[i - 1:i + 1], self.context_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
char_len) self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self._last_char[0] = byte_str[-1] self._last_char[0] = byte_str[-1]
if self.state == ProbingState.DETECTING: if self.state == ProbingState.DETECTING:
if (self.context_analyzer.got_enough_data() and if self.context_analyzer.got_enough_data() and (
(self.get_confidence() > self.SHORTCUT_THRESHOLD)): self.get_confidence() > self.SHORTCUT_THRESHOLD
):
self._state = ProbingState.FOUND_IT self._state = ProbingState.FOUND_IT
return self.state return self.state
def get_confidence(self): def get_confidence(self) -> float:
assert self.distribution_analyzer is not None
context_conf = self.context_analyzer.get_confidence() context_conf = self.context_analyzer.get_confidence()
distrib_conf = self.distribution_analyzer.get_confidence() distrib_conf = self.distribution_analyzer.get_confidence()
return max(context_conf, distrib_conf) return max(context_conf, distrib_conf)

View file

@ -43,6 +43,7 @@ EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352 EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table , # Char to FreqOrder table ,
# fmt: off
EUCKR_CHAR_TO_FREQ_ORDER = ( EUCKR_CHAR_TO_FREQ_ORDER = (
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87, 13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398, 1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
@ -192,4 +193,4 @@ EUCKR_CHAR_TO_FREQ_ORDER = (
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042, 2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256 670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
) )
# fmt: on

View file

@ -25,23 +25,23 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis from .chardistribution import EUCKRDistributionAnalysis
from .codingstatemachine import CodingStateMachine
from .mbcharsetprober import MultiByteCharSetProber
from .mbcssm import EUCKR_SM_MODEL from .mbcssm import EUCKR_SM_MODEL
class EUCKRProber(MultiByteCharSetProber): class EUCKRProber(MultiByteCharSetProber):
def __init__(self): def __init__(self) -> None:
super(EUCKRProber, self).__init__() super().__init__()
self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL) self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
self.distribution_analyzer = EUCKRDistributionAnalysis() self.distribution_analyzer = EUCKRDistributionAnalysis()
self.reset() self.reset()
@property @property
def charset_name(self): def charset_name(self) -> str:
return "EUC-KR" return "EUC-KR"
@property @property
def language(self): def language(self) -> str:
return "Korean" return "Korean"

View file

@ -43,9 +43,10 @@
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75 EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table , # Char to FreqOrder table
EUCTW_TABLE_SIZE = 5376 EUCTW_TABLE_SIZE = 5376
# fmt: off
EUCTW_CHAR_TO_FREQ_ORDER = ( EUCTW_CHAR_TO_FREQ_ORDER = (
1, 1800, 1506, 255, 1431, 198, 9, 82, 6, 7310, 177, 202, 3615, 1256, 2808, 110, # 2742 1, 1800, 1506, 255, 1431, 198, 9, 82, 6, 7310, 177, 202, 3615, 1256, 2808, 110, # 2742
3735, 33, 3241, 261, 76, 44, 2113, 16, 2931, 2184, 1176, 659, 3868, 26, 3404, 2643, # 2758 3735, 33, 3241, 261, 76, 44, 2113, 16, 2931, 2184, 1176, 659, 3868, 26, 3404, 2643, # 2758
@ -384,4 +385,4 @@ EUCTW_CHAR_TO_FREQ_ORDER = (
890, 3614, 3864, 8110, 1877, 3732, 3402, 8111, 2183, 2353, 3403, 1652, 8112, 8113, 8114, 941, # 8086 890, 3614, 3864, 8110, 1877, 3732, 3402, 8111, 2183, 2353, 3403, 1652, 8112, 8113, 8114, 941, # 8086
2294, 208, 3499, 4057, 2019, 330, 4294, 3865, 2892, 2492, 3733, 4295, 8115, 8116, 8117, 8118, # 8102 2294, 208, 3499, 4057, 2019, 330, 4294, 3865, 2892, 2492, 3733, 4295, 8115, 8116, 8117, 8118, # 8102
) )
# fmt: on

View file

@ -25,22 +25,23 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis from .chardistribution import EUCTWDistributionAnalysis
from .codingstatemachine import CodingStateMachine
from .mbcharsetprober import MultiByteCharSetProber
from .mbcssm import EUCTW_SM_MODEL from .mbcssm import EUCTW_SM_MODEL
class EUCTWProber(MultiByteCharSetProber): class EUCTWProber(MultiByteCharSetProber):
def __init__(self): def __init__(self) -> None:
super(EUCTWProber, self).__init__() super().__init__()
self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL) self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
self.distribution_analyzer = EUCTWDistributionAnalysis() self.distribution_analyzer = EUCTWDistributionAnalysis()
self.reset() self.reset()
@property @property
def charset_name(self): def charset_name(self) -> str:
return "EUC-TW" return "EUC-TW"
@property @property
def language(self): def language(self) -> str:
return "Taiwan" return "Taiwan"

View file

@ -43,6 +43,7 @@ GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760 GB2312_TABLE_SIZE = 3760
# fmt: off
GB2312_CHAR_TO_FREQ_ORDER = ( GB2312_CHAR_TO_FREQ_ORDER = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205, 1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842, 2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
@ -280,4 +281,4 @@ GB2312_CHAR_TO_FREQ_ORDER = (
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189, 381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, #last 512 852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, #last 512
) )
# fmt: on

View file

@ -25,22 +25,23 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis from .chardistribution import GB2312DistributionAnalysis
from .codingstatemachine import CodingStateMachine
from .mbcharsetprober import MultiByteCharSetProber
from .mbcssm import GB2312_SM_MODEL from .mbcssm import GB2312_SM_MODEL
class GB2312Prober(MultiByteCharSetProber): class GB2312Prober(MultiByteCharSetProber):
def __init__(self): def __init__(self) -> None:
super(GB2312Prober, self).__init__() super().__init__()
self.coding_sm = CodingStateMachine(GB2312_SM_MODEL) self.coding_sm = CodingStateMachine(GB2312_SM_MODEL)
self.distribution_analyzer = GB2312DistributionAnalysis() self.distribution_analyzer = GB2312DistributionAnalysis()
self.reset() self.reset()
@property @property
def charset_name(self): def charset_name(self) -> str:
return "GB2312" return "GB2312"
@property @property
def language(self): def language(self) -> str:
return "Chinese" return "Chinese"

View file

@ -25,8 +25,11 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from typing import Optional, Union
from .charsetprober import CharSetProber from .charsetprober import CharSetProber
from .enums import ProbingState from .enums import ProbingState
from .sbcharsetprober import SingleByteCharSetProber
# This prober doesn't actually recognize a language or a charset. # This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers # It is a helper prober for the use of the Hebrew model probers
@ -125,18 +128,20 @@ from .enums import ProbingState
# model probers scores. The answer is returned in the form of the name of the # model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8". # charset identified, either "windows-1255" or "ISO-8859-8".
class HebrewProber(CharSetProber): class HebrewProber(CharSetProber):
SPACE = 0x20
# windows-1255 / ISO-8859-8 code points of interest # windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea FINAL_KAF = 0xEA
NORMAL_KAF = 0xeb NORMAL_KAF = 0xEB
FINAL_MEM = 0xed FINAL_MEM = 0xED
NORMAL_MEM = 0xee NORMAL_MEM = 0xEE
FINAL_NUN = 0xef FINAL_NUN = 0xEF
NORMAL_NUN = 0xf0 NORMAL_NUN = 0xF0
FINAL_PE = 0xf3 FINAL_PE = 0xF3
NORMAL_PE = 0xf4 NORMAL_PE = 0xF4
FINAL_TSADI = 0xf5 FINAL_TSADI = 0xF5
NORMAL_TSADI = 0xf6 NORMAL_TSADI = 0xF6
# Minimum Visual vs Logical final letter score difference. # Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score # If the difference is below this, don't rely solely on the final letter score
@ -151,35 +156,44 @@ class HebrewProber(CharSetProber):
VISUAL_HEBREW_NAME = "ISO-8859-8" VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255" LOGICAL_HEBREW_NAME = "windows-1255"
def __init__(self): def __init__(self) -> None:
super(HebrewProber, self).__init__() super().__init__()
self._final_char_logical_score = None self._final_char_logical_score = 0
self._final_char_visual_score = None self._final_char_visual_score = 0
self._prev = None self._prev = self.SPACE
self._before_prev = None self._before_prev = self.SPACE
self._logical_prober = None self._logical_prober: Optional[SingleByteCharSetProber] = None
self._visual_prober = None self._visual_prober: Optional[SingleByteCharSetProber] = None
self.reset() self.reset()
def reset(self): def reset(self) -> None:
self._final_char_logical_score = 0 self._final_char_logical_score = 0
self._final_char_visual_score = 0 self._final_char_visual_score = 0
# The two last characters seen in the previous buffer, # The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate # mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data # a word delimiter at the beginning of the data
self._prev = ' ' self._prev = self.SPACE
self._before_prev = ' ' self._before_prev = self.SPACE
# These probers are owned by the group prober. # These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber): def set_model_probers(
self._logical_prober = logicalProber self,
self._visual_prober = visualProber logical_prober: SingleByteCharSetProber,
visual_prober: SingleByteCharSetProber,
) -> None:
self._logical_prober = logical_prober
self._visual_prober = visual_prober
def is_final(self, c): def is_final(self, c: int) -> bool:
return c in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN, return c in [
self.FINAL_PE, self.FINAL_TSADI] self.FINAL_KAF,
self.FINAL_MEM,
self.FINAL_NUN,
self.FINAL_PE,
self.FINAL_TSADI,
]
def is_non_final(self, c): def is_non_final(self, c: int) -> bool:
# The normal Tsadi is not a good Non-Final letter due to words like # The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This # 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters # apostrophe is converted to a space in FilterWithoutEnglishLetters
@ -190,10 +204,9 @@ class HebrewProber(CharSetProber):
# for example legally end with a Non-Final Pe or Kaf. However, the # for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage # benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare. # since these words are quite rare.
return c in [self.NORMAL_KAF, self.NORMAL_MEM, return c in [self.NORMAL_KAF, self.NORMAL_MEM, self.NORMAL_NUN, self.NORMAL_PE]
self.NORMAL_NUN, self.NORMAL_PE]
def feed(self, byte_str): def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
# Final letter analysis for logical-visual decision. # Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew # Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew. # or visual Hebrew.
@ -227,9 +240,9 @@ class HebrewProber(CharSetProber):
byte_str = self.filter_high_byte_only(byte_str) byte_str = self.filter_high_byte_only(byte_str)
for cur in byte_str: for cur in byte_str:
if cur == ' ': if cur == self.SPACE:
# We stand on a space - a word just ended # We stand on a space - a word just ended
if self._before_prev != ' ': if self._before_prev != self.SPACE:
# next-to-last char was not a space so self._prev is not a # next-to-last char was not a space so self._prev is not a
# 1 letter word # 1 letter word
if self.is_final(self._prev): if self.is_final(self._prev):
@ -241,8 +254,11 @@ class HebrewProber(CharSetProber):
self._final_char_visual_score += 1 self._final_char_visual_score += 1
else: else:
# Not standing on a space # Not standing on a space
if ((self._before_prev == ' ') and if (
(self.is_final(self._prev)) and (cur != ' ')): (self._before_prev == self.SPACE)
and (self.is_final(self._prev))
and (cur != self.SPACE)
):
# case (3) [-2:space][-1:final letter][cur:not space] # case (3) [-2:space][-1:final letter][cur:not space]
self._final_char_visual_score += 1 self._final_char_visual_score += 1
self._before_prev = self._prev self._before_prev = self._prev
@ -253,7 +269,10 @@ class HebrewProber(CharSetProber):
return ProbingState.DETECTING return ProbingState.DETECTING
@property @property
def charset_name(self): def charset_name(self) -> str:
assert self._logical_prober is not None
assert self._visual_prober is not None
# Make the decision: is it Logical or Visual? # Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it. # If the final letter score distance is dominant enough, rely on it.
finalsub = self._final_char_logical_score - self._final_char_visual_score finalsub = self._final_char_logical_score - self._final_char_visual_score
@ -263,8 +282,9 @@ class HebrewProber(CharSetProber):
return self.VISUAL_HEBREW_NAME return self.VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead. # It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._logical_prober.get_confidence() modelsub = (
- self._visual_prober.get_confidence()) self._logical_prober.get_confidence() - self._visual_prober.get_confidence()
)
if modelsub > self.MIN_MODEL_DISTANCE: if modelsub > self.MIN_MODEL_DISTANCE:
return self.LOGICAL_HEBREW_NAME return self.LOGICAL_HEBREW_NAME
if modelsub < -self.MIN_MODEL_DISTANCE: if modelsub < -self.MIN_MODEL_DISTANCE:
@ -280,13 +300,17 @@ class HebrewProber(CharSetProber):
return self.LOGICAL_HEBREW_NAME return self.LOGICAL_HEBREW_NAME
@property @property
def language(self): def language(self) -> str:
return 'Hebrew' return "Hebrew"
@property @property
def state(self): def state(self) -> ProbingState:
assert self._logical_prober is not None
assert self._visual_prober is not None
# Remain active as long as any of the model probers are active. # Remain active as long as any of the model probers are active.
if (self._logical_prober.state == ProbingState.NOT_ME) and \ if (self._logical_prober.state == ProbingState.NOT_ME) and (
(self._visual_prober.state == ProbingState.NOT_ME): self._visual_prober.state == ProbingState.NOT_ME
):
return ProbingState.NOT_ME return ProbingState.NOT_ME
return ProbingState.DETECTING return ProbingState.DETECTING

View file

@ -46,6 +46,7 @@ JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table , # Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368 JIS_TABLE_SIZE = 4368
# fmt: off
JIS_CHAR_TO_FREQ_ORDER = ( JIS_CHAR_TO_FREQ_ORDER = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16 40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32 3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
@ -321,5 +322,4 @@ JIS_CHAR_TO_FREQ_ORDER = (
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352 1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512 2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
) )
# fmt: on

2382
lib/chardet/johabfreq.py Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,47 @@
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .chardistribution import JOHABDistributionAnalysis
from .codingstatemachine import CodingStateMachine
from .mbcharsetprober import MultiByteCharSetProber
from .mbcssm import JOHAB_SM_MODEL
class JOHABProber(MultiByteCharSetProber):
def __init__(self) -> None:
super().__init__()
self.coding_sm = CodingStateMachine(JOHAB_SM_MODEL)
self.distribution_analyzer = JOHABDistributionAnalysis()
self.reset()
@property
def charset_name(self) -> str:
return "Johab"
@property
def language(self) -> str:
return "Korean"

View file

@ -25,9 +25,11 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from typing import List, Tuple, Union
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category # This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = ( # fmt: off
jp2_char_context = (
(0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1), (0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1),
(2, 4, 0, 4, 0, 3, 0, 4, 0, 3, 4, 4, 4, 2, 4, 3, 3, 4, 3, 2, 3, 3, 4, 2, 3, 3, 3, 2, 4, 1, 4, 3, 3, 1, 5, 4, 3, 4, 3, 4, 3, 5, 3, 0, 3, 5, 4, 2, 0, 3, 1, 0, 3, 3, 0, 3, 3, 0, 1, 1, 0, 4, 3, 0, 3, 3, 0, 4, 0, 2, 0, 3, 5, 5, 5, 5, 4, 0, 4, 1, 0, 3, 4), (2, 4, 0, 4, 0, 3, 0, 4, 0, 3, 4, 4, 4, 2, 4, 3, 3, 4, 3, 2, 3, 3, 4, 2, 3, 3, 3, 2, 4, 1, 4, 3, 3, 1, 5, 4, 3, 4, 3, 4, 3, 5, 3, 0, 3, 5, 4, 2, 0, 3, 1, 0, 3, 3, 0, 3, 3, 0, 1, 1, 0, 4, 3, 0, 3, 3, 0, 4, 0, 2, 0, 3, 5, 5, 5, 5, 4, 0, 4, 1, 0, 3, 4),
(0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2), (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2),
@ -112,23 +114,25 @@ jp2CharContext = (
(0, 4, 0, 4, 0, 4, 0, 3, 0, 4, 4, 3, 4, 2, 4, 3, 2, 0, 4, 4, 4, 3, 5, 3, 5, 3, 3, 2, 4, 2, 4, 3, 4, 3, 1, 4, 0, 2, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4, 3, 4, 1, 3, 4, 3, 2, 1, 2, 1, 3, 3, 3, 4, 4, 3, 3, 5, 0, 4, 0, 3, 0, 4, 3, 3, 3, 2, 1, 0, 3, 0, 0, 3, 3), (0, 4, 0, 4, 0, 4, 0, 3, 0, 4, 4, 3, 4, 2, 4, 3, 2, 0, 4, 4, 4, 3, 5, 3, 5, 3, 3, 2, 4, 2, 4, 3, 4, 3, 1, 4, 0, 2, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4, 3, 4, 1, 3, 4, 3, 2, 1, 2, 1, 3, 3, 3, 4, 4, 3, 3, 5, 0, 4, 0, 3, 0, 4, 3, 3, 3, 2, 1, 0, 3, 0, 0, 3, 3),
(0, 4, 0, 3, 0, 3, 0, 3, 0, 3, 5, 5, 3, 3, 3, 3, 4, 3, 4, 3, 3, 3, 4, 4, 4, 3, 3, 3, 3, 4, 3, 5, 3, 3, 1, 3, 2, 4, 5, 5, 5, 5, 4, 3, 4, 5, 5, 3, 2, 2, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 2, 4, 3, 3, 3, 4, 0, 4, 0, 2, 0, 4, 3, 2, 2, 1, 2, 0, 3, 0, 0, 4, 1), (0, 4, 0, 3, 0, 3, 0, 3, 0, 3, 5, 5, 3, 3, 3, 3, 4, 3, 4, 3, 3, 3, 4, 4, 4, 3, 3, 3, 3, 4, 3, 5, 3, 3, 1, 3, 2, 4, 5, 5, 5, 5, 4, 3, 4, 5, 5, 3, 2, 2, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 2, 4, 3, 3, 3, 4, 0, 4, 0, 2, 0, 4, 3, 2, 2, 1, 2, 0, 3, 0, 0, 4, 1),
) )
# fmt: on
class JapaneseContextAnalysis(object):
class JapaneseContextAnalysis:
NUM_OF_CATEGORY = 6 NUM_OF_CATEGORY = 6
DONT_KNOW = -1 DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100 ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000 MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4 MINIMUM_DATA_THRESHOLD = 4
def __init__(self): def __init__(self) -> None:
self._total_rel = None self._total_rel = 0
self._rel_sample = None self._rel_sample: List[int] = []
self._need_to_skip_char_num = None self._need_to_skip_char_num = 0
self._last_char_order = None self._last_char_order = -1
self._done = None self._done = False
self.reset() self.reset()
def reset(self): def reset(self) -> None:
self._total_rel = 0 # total sequence received self._total_rel = 0 # total sequence received
# category counters, each integer counts sequence in its category # category counters, each integer counts sequence in its category
self._rel_sample = [0] * self.NUM_OF_CATEGORY self._rel_sample = [0] * self.NUM_OF_CATEGORY
@ -140,7 +144,7 @@ class JapaneseContextAnalysis(object):
# been made # been made
self._done = False self._done = False
def feed(self, byte_str, num_bytes): def feed(self, byte_str: Union[bytes, bytearray], num_bytes: int) -> None:
if self._done: if self._done:
return return
@ -164,32 +168,34 @@ class JapaneseContextAnalysis(object):
if self._total_rel > self.MAX_REL_THRESHOLD: if self._total_rel > self.MAX_REL_THRESHOLD:
self._done = True self._done = True
break break
self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1 self._rel_sample[
jp2_char_context[self._last_char_order][order]
] += 1
self._last_char_order = order self._last_char_order = order
def got_enough_data(self): def got_enough_data(self) -> bool:
return self._total_rel > self.ENOUGH_REL_THRESHOLD return self._total_rel > self.ENOUGH_REL_THRESHOLD
def get_confidence(self): def get_confidence(self) -> float:
# This is just one way to calculate confidence. It works well for me. # This is just one way to calculate confidence. It works well for me.
if self._total_rel > self.MINIMUM_DATA_THRESHOLD: if self._total_rel > self.MINIMUM_DATA_THRESHOLD:
return (self._total_rel - self._rel_sample[0]) / self._total_rel return (self._total_rel - self._rel_sample[0]) / self._total_rel
else:
return self.DONT_KNOW return self.DONT_KNOW
def get_order(self, byte_str): def get_order(self, _: Union[bytes, bytearray]) -> Tuple[int, int]:
return -1, 1 return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis): class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self): def __init__(self) -> None:
super(SJISContextAnalysis, self).__init__() super().__init__()
self._charset_name = "SHIFT_JIS" self._charset_name = "SHIFT_JIS"
@property @property
def charset_name(self): def charset_name(self) -> str:
return self._charset_name return self._charset_name
def get_order(self, byte_str): def get_order(self, byte_str: Union[bytes, bytearray]) -> Tuple[int, int]:
if not byte_str: if not byte_str:
return -1, 1 return -1, 1
# find out current char's byte length # find out current char's byte length
@ -209,8 +215,9 @@ class SJISContextAnalysis(JapaneseContextAnalysis):
return -1, char_len return -1, char_len
class EUCJPContextAnalysis(JapaneseContextAnalysis): class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, byte_str): def get_order(self, byte_str: Union[bytes, bytearray]) -> Tuple[int, int]:
if not byte_str: if not byte_str:
return -1, 1 return -1, 1
# find out current char's byte length # find out current char's byte length
@ -229,5 +236,3 @@ class EUCJPContextAnalysis(JapaneseContextAnalysis):
return second_char - 0xA1, char_len return second_char - 0xA1, char_len
return -1, char_len return -1, char_len

View file

@ -1,9 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from chardet.sbcharsetprober import SingleByteCharSetModel from chardet.sbcharsetprober import SingleByteCharSetModel
# 3: Positive # 3: Positive
# 2: Likely # 2: Likely
# 1: Unlikely # 1: Unlikely
@ -4373,13 +4369,15 @@ ISO_8859_5_BULGARIAN_CHAR_TO_ORDER = {
255: 253, # 'џ' 255: 253, # 'џ'
} }
ISO_8859_5_BULGARIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-5', ISO_8859_5_BULGARIAN_MODEL = SingleByteCharSetModel(
language='Bulgairan', charset_name="ISO-8859-5",
language="Bulgarian",
char_to_order_map=ISO_8859_5_BULGARIAN_CHAR_TO_ORDER, char_to_order_map=ISO_8859_5_BULGARIAN_CHAR_TO_ORDER,
language_model=BULGARIAN_LANG_MODEL, language_model=BULGARIAN_LANG_MODEL,
typical_positive_ratio=0.969392, typical_positive_ratio=0.969392,
keep_ascii_letters=False, keep_ascii_letters=False,
alphabet='АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя') alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя",
)
WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER = { WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER = {
0: 255, # '\x00' 0: 255, # '\x00'
@ -4640,11 +4638,12 @@ WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER = {
255: 16, # 'я' 255: 16, # 'я'
} }
WINDOWS_1251_BULGARIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1251', WINDOWS_1251_BULGARIAN_MODEL = SingleByteCharSetModel(
language='Bulgarian', charset_name="windows-1251",
language="Bulgarian",
char_to_order_map=WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER, char_to_order_map=WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER,
language_model=BULGARIAN_LANG_MODEL, language_model=BULGARIAN_LANG_MODEL,
typical_positive_ratio=0.969392, typical_positive_ratio=0.969392,
keep_ascii_letters=False, keep_ascii_letters=False,
alphabet='АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя') alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя",
)

View file

@ -1,9 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from chardet.sbcharsetprober import SingleByteCharSetModel from chardet.sbcharsetprober import SingleByteCharSetModel
# 3: Positive # 3: Positive
# 2: Likely # 2: Likely
# 1: Unlikely # 1: Unlikely
@ -4121,13 +4117,15 @@ WINDOWS_1253_GREEK_CHAR_TO_ORDER = {
255: 253, # None 255: 253, # None
} }
WINDOWS_1253_GREEK_MODEL = SingleByteCharSetModel(charset_name='windows-1253', WINDOWS_1253_GREEK_MODEL = SingleByteCharSetModel(
language='Greek', charset_name="windows-1253",
language="Greek",
char_to_order_map=WINDOWS_1253_GREEK_CHAR_TO_ORDER, char_to_order_map=WINDOWS_1253_GREEK_CHAR_TO_ORDER,
language_model=GREEK_LANG_MODEL, language_model=GREEK_LANG_MODEL,
typical_positive_ratio=0.982851, typical_positive_ratio=0.982851,
keep_ascii_letters=False, keep_ascii_letters=False,
alphabet='ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ') alphabet="ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ",
)
ISO_8859_7_GREEK_CHAR_TO_ORDER = { ISO_8859_7_GREEK_CHAR_TO_ORDER = {
0: 255, # '\x00' 0: 255, # '\x00'
@ -4388,11 +4386,12 @@ ISO_8859_7_GREEK_CHAR_TO_ORDER = {
255: 253, # None 255: 253, # None
} }
ISO_8859_7_GREEK_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-7', ISO_8859_7_GREEK_MODEL = SingleByteCharSetModel(
language='Greek', charset_name="ISO-8859-7",
language="Greek",
char_to_order_map=ISO_8859_7_GREEK_CHAR_TO_ORDER, char_to_order_map=ISO_8859_7_GREEK_CHAR_TO_ORDER,
language_model=GREEK_LANG_MODEL, language_model=GREEK_LANG_MODEL,
typical_positive_ratio=0.982851, typical_positive_ratio=0.982851,
keep_ascii_letters=False, keep_ascii_letters=False,
alphabet='ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ') alphabet="ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ",
)

View file

@ -1,9 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from chardet.sbcharsetprober import SingleByteCharSetModel from chardet.sbcharsetprober import SingleByteCharSetModel
# 3: Positive # 3: Positive
# 2: Likely # 2: Likely
# 1: Unlikely # 1: Unlikely
@ -4373,11 +4369,12 @@ WINDOWS_1255_HEBREW_CHAR_TO_ORDER = {
255: 253, # None 255: 253, # None
} }
WINDOWS_1255_HEBREW_MODEL = SingleByteCharSetModel(charset_name='windows-1255', WINDOWS_1255_HEBREW_MODEL = SingleByteCharSetModel(
language='Hebrew', charset_name="windows-1255",
language="Hebrew",
char_to_order_map=WINDOWS_1255_HEBREW_CHAR_TO_ORDER, char_to_order_map=WINDOWS_1255_HEBREW_CHAR_TO_ORDER,
language_model=HEBREW_LANG_MODEL, language_model=HEBREW_LANG_MODEL,
typical_positive_ratio=0.984004, typical_positive_ratio=0.984004,
keep_ascii_letters=False, keep_ascii_letters=False,
alphabet='אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ') alphabet="אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ",
)

View file

@ -1,9 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from chardet.sbcharsetprober import SingleByteCharSetModel from chardet.sbcharsetprober import SingleByteCharSetModel
# 3: Positive # 3: Positive
# 2: Likely # 2: Likely
# 1: Unlikely # 1: Unlikely
@ -4373,13 +4369,15 @@ WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER = {
255: 253, # '˙' 255: 253, # '˙'
} }
WINDOWS_1250_HUNGARIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1250', WINDOWS_1250_HUNGARIAN_MODEL = SingleByteCharSetModel(
language='Hungarian', charset_name="windows-1250",
language="Hungarian",
char_to_order_map=WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER, char_to_order_map=WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER,
language_model=HUNGARIAN_LANG_MODEL, language_model=HUNGARIAN_LANG_MODEL,
typical_positive_ratio=0.947368, typical_positive_ratio=0.947368,
keep_ascii_letters=True, keep_ascii_letters=True,
alphabet='ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű') alphabet="ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű",
)
ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER = { ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER = {
0: 255, # '\x00' 0: 255, # '\x00'
@ -4640,11 +4638,12 @@ ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER = {
255: 253, # '˙' 255: 253, # '˙'
} }
ISO_8859_2_HUNGARIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-2', ISO_8859_2_HUNGARIAN_MODEL = SingleByteCharSetModel(
language='Hungarian', charset_name="ISO-8859-2",
language="Hungarian",
char_to_order_map=ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER, char_to_order_map=ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER,
language_model=HUNGARIAN_LANG_MODEL, language_model=HUNGARIAN_LANG_MODEL,
typical_positive_ratio=0.947368, typical_positive_ratio=0.947368,
keep_ascii_letters=True, keep_ascii_letters=True,
alphabet='ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű') alphabet="ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű",
)

View file

@ -1,9 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from chardet.sbcharsetprober import SingleByteCharSetModel from chardet.sbcharsetprober import SingleByteCharSetModel
# 3: Positive # 3: Positive
# 2: Likely # 2: Likely
# 1: Unlikely # 1: Unlikely
@ -4373,13 +4369,15 @@ IBM866_RUSSIAN_CHAR_TO_ORDER = {
255: 255, # '\xa0' 255: 255, # '\xa0'
} }
IBM866_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='IBM866', IBM866_RUSSIAN_MODEL = SingleByteCharSetModel(
language='Russian', charset_name="IBM866",
language="Russian",
char_to_order_map=IBM866_RUSSIAN_CHAR_TO_ORDER, char_to_order_map=IBM866_RUSSIAN_CHAR_TO_ORDER,
language_model=RUSSIAN_LANG_MODEL, language_model=RUSSIAN_LANG_MODEL,
typical_positive_ratio=0.976601, typical_positive_ratio=0.976601,
keep_ascii_letters=False, keep_ascii_letters=False,
alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё') alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
)
WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER = { WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER = {
0: 255, # '\x00' 0: 255, # '\x00'
@ -4640,13 +4638,15 @@ WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER = {
255: 16, # 'я' 255: 16, # 'я'
} }
WINDOWS_1251_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1251', WINDOWS_1251_RUSSIAN_MODEL = SingleByteCharSetModel(
language='Russian', charset_name="windows-1251",
language="Russian",
char_to_order_map=WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER, char_to_order_map=WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER,
language_model=RUSSIAN_LANG_MODEL, language_model=RUSSIAN_LANG_MODEL,
typical_positive_ratio=0.976601, typical_positive_ratio=0.976601,
keep_ascii_letters=False, keep_ascii_letters=False,
alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё') alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
)
IBM855_RUSSIAN_CHAR_TO_ORDER = { IBM855_RUSSIAN_CHAR_TO_ORDER = {
0: 255, # '\x00' 0: 255, # '\x00'
@ -4907,13 +4907,15 @@ IBM855_RUSSIAN_CHAR_TO_ORDER = {
255: 255, # '\xa0' 255: 255, # '\xa0'
} }
IBM855_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='IBM855', IBM855_RUSSIAN_MODEL = SingleByteCharSetModel(
language='Russian', charset_name="IBM855",
language="Russian",
char_to_order_map=IBM855_RUSSIAN_CHAR_TO_ORDER, char_to_order_map=IBM855_RUSSIAN_CHAR_TO_ORDER,
language_model=RUSSIAN_LANG_MODEL, language_model=RUSSIAN_LANG_MODEL,
typical_positive_ratio=0.976601, typical_positive_ratio=0.976601,
keep_ascii_letters=False, keep_ascii_letters=False,
alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё') alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
)
KOI8_R_RUSSIAN_CHAR_TO_ORDER = { KOI8_R_RUSSIAN_CHAR_TO_ORDER = {
0: 255, # '\x00' 0: 255, # '\x00'
@ -5174,13 +5176,15 @@ KOI8_R_RUSSIAN_CHAR_TO_ORDER = {
255: 70, # 'Ъ' 255: 70, # 'Ъ'
} }
KOI8_R_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='KOI8-R', KOI8_R_RUSSIAN_MODEL = SingleByteCharSetModel(
language='Russian', charset_name="KOI8-R",
language="Russian",
char_to_order_map=KOI8_R_RUSSIAN_CHAR_TO_ORDER, char_to_order_map=KOI8_R_RUSSIAN_CHAR_TO_ORDER,
language_model=RUSSIAN_LANG_MODEL, language_model=RUSSIAN_LANG_MODEL,
typical_positive_ratio=0.976601, typical_positive_ratio=0.976601,
keep_ascii_letters=False, keep_ascii_letters=False,
alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё') alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
)
MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER = { MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER = {
0: 255, # '\x00' 0: 255, # '\x00'
@ -5441,13 +5445,15 @@ MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER = {
255: 255, # '€' 255: 255, # '€'
} }
MACCYRILLIC_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='MacCyrillic', MACCYRILLIC_RUSSIAN_MODEL = SingleByteCharSetModel(
language='Russian', charset_name="MacCyrillic",
language="Russian",
char_to_order_map=MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER, char_to_order_map=MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER,
language_model=RUSSIAN_LANG_MODEL, language_model=RUSSIAN_LANG_MODEL,
typical_positive_ratio=0.976601, typical_positive_ratio=0.976601,
keep_ascii_letters=False, keep_ascii_letters=False,
alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё') alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
)
ISO_8859_5_RUSSIAN_CHAR_TO_ORDER = { ISO_8859_5_RUSSIAN_CHAR_TO_ORDER = {
0: 255, # '\x00' 0: 255, # '\x00'
@ -5708,11 +5714,12 @@ ISO_8859_5_RUSSIAN_CHAR_TO_ORDER = {
255: 255, # 'џ' 255: 255, # 'џ'
} }
ISO_8859_5_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-5', ISO_8859_5_RUSSIAN_MODEL = SingleByteCharSetModel(
language='Russian', charset_name="ISO-8859-5",
language="Russian",
char_to_order_map=ISO_8859_5_RUSSIAN_CHAR_TO_ORDER, char_to_order_map=ISO_8859_5_RUSSIAN_CHAR_TO_ORDER,
language_model=RUSSIAN_LANG_MODEL, language_model=RUSSIAN_LANG_MODEL,
typical_positive_ratio=0.976601, typical_positive_ratio=0.976601,
keep_ascii_letters=False, keep_ascii_letters=False,
alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё') alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
)

View file

@ -1,9 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from chardet.sbcharsetprober import SingleByteCharSetModel from chardet.sbcharsetprober import SingleByteCharSetModel
# 3: Positive # 3: Positive
# 2: Likely # 2: Likely
# 1: Unlikely # 1: Unlikely
@ -4373,11 +4369,12 @@ TIS_620_THAI_CHAR_TO_ORDER = {
255: 253, # None 255: 253, # None
} }
TIS_620_THAI_MODEL = SingleByteCharSetModel(charset_name='TIS-620', TIS_620_THAI_MODEL = SingleByteCharSetModel(
language='Thai', charset_name="TIS-620",
language="Thai",
char_to_order_map=TIS_620_THAI_CHAR_TO_ORDER, char_to_order_map=TIS_620_THAI_CHAR_TO_ORDER,
language_model=THAI_LANG_MODEL, language_model=THAI_LANG_MODEL,
typical_positive_ratio=0.926386, typical_positive_ratio=0.926386,
keep_ascii_letters=False, keep_ascii_letters=False,
alphabet='กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛') alphabet="กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛",
)

View file

@ -1,9 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from chardet.sbcharsetprober import SingleByteCharSetModel from chardet.sbcharsetprober import SingleByteCharSetModel
# 3: Positive # 3: Positive
# 2: Likely # 2: Likely
# 1: Unlikely # 1: Unlikely
@ -4373,11 +4369,12 @@ ISO_8859_9_TURKISH_CHAR_TO_ORDER = {
255: 107, # 'ÿ' 255: 107, # 'ÿ'
} }
ISO_8859_9_TURKISH_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-9', ISO_8859_9_TURKISH_MODEL = SingleByteCharSetModel(
language='Turkish', charset_name="ISO-8859-9",
language="Turkish",
char_to_order_map=ISO_8859_9_TURKISH_CHAR_TO_ORDER, char_to_order_map=ISO_8859_9_TURKISH_CHAR_TO_ORDER,
language_model=TURKISH_LANG_MODEL, language_model=TURKISH_LANG_MODEL,
typical_positive_ratio=0.97029, typical_positive_ratio=0.97029,
keep_ascii_letters=True, keep_ascii_letters=True,
alphabet='ABCDEFGHIJKLMNOPRSTUVYZabcdefghijklmnoprstuvyzÂÇÎÖÛÜâçîöûüĞğİıŞş') alphabet="ABCDEFGHIJKLMNOPRSTUVYZabcdefghijklmnoprstuvyzÂÇÎÖÛÜâçîöûüĞğİıŞş",
)

View file

@ -26,6 +26,8 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from typing import List, Union
from .charsetprober import CharSetProber from .charsetprober import CharSetProber
from .enums import ProbingState from .enums import ProbingState
@ -41,6 +43,7 @@ ASV = 6 # accent small vowel
ASO = 7 # accent small other ASO = 7 # accent small other
CLASS_NUM = 8 # total classes CLASS_NUM = 8 # total classes
# fmt: off
Latin1_CharToClass = ( Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
@ -91,34 +94,34 @@ Latin1ClassModel = (
0, 3, 1, 3, 1, 1, 1, 3, # ASV 0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO 0, 3, 1, 3, 1, 1, 3, 3, # ASO
) )
# fmt: on
class Latin1Prober(CharSetProber): class Latin1Prober(CharSetProber):
def __init__(self): def __init__(self) -> None:
super(Latin1Prober, self).__init__() super().__init__()
self._last_char_class = None self._last_char_class = OTH
self._freq_counter = None self._freq_counter: List[int] = []
self.reset() self.reset()
def reset(self): def reset(self) -> None:
self._last_char_class = OTH self._last_char_class = OTH
self._freq_counter = [0] * FREQ_CAT_NUM self._freq_counter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self) super().reset()
@property @property
def charset_name(self): def charset_name(self) -> str:
return "ISO-8859-1" return "ISO-8859-1"
@property @property
def language(self): def language(self) -> str:
return "" return ""
def feed(self, byte_str): def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
byte_str = self.filter_with_english_letters(byte_str) byte_str = self.remove_xml_tags(byte_str)
for c in byte_str: for c in byte_str:
char_class = Latin1_CharToClass[c] char_class = Latin1_CharToClass[c]
freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM) freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM) + char_class]
+ char_class]
if freq == 0: if freq == 0:
self._state = ProbingState.NOT_ME self._state = ProbingState.NOT_ME
break break
@ -127,19 +130,18 @@ class Latin1Prober(CharSetProber):
return self.state return self.state
def get_confidence(self): def get_confidence(self) -> float:
if self.state == ProbingState.NOT_ME: if self.state == ProbingState.NOT_ME:
return 0.01 return 0.01
total = sum(self._freq_counter) total = sum(self._freq_counter)
if total < 0.01: confidence = (
confidence = 0.0 0.0
else: if total < 0.01
confidence = ((self._freq_counter[3] - self._freq_counter[1] * 20.0) else (self._freq_counter[3] - self._freq_counter[1] * 20.0) / total
/ total) )
if confidence < 0.0: confidence = max(confidence, 0.0)
confidence = 0.0
# lower the confidence of latin1 so that other more accurate # lower the confidence of latin1 so that other more accurate
# detector can take priority. # detector can take priority.
confidence = confidence * 0.73 confidence *= 0.73
return confidence return confidence

View file

@ -0,0 +1,162 @@
######################## BEGIN LICENSE BLOCK ########################
# This code was modified from latin1prober.py by Rob Speer <rob@lumino.so>.
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Rob Speer - adapt to MacRoman encoding
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from typing import List, Union
from .charsetprober import CharSetProber
from .enums import ProbingState
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
ODD = 8 # character that is unlikely to appear
CLASS_NUM = 9 # total classes
# The change from Latin1 is that we explicitly look for extended characters
# that are infrequently-occurring symbols, and consider them to always be
# improbable. This should let MacRoman get out of the way of more likely
# encodings in most situations.
# fmt: off
MacRoman_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
ACV, ACV, ACO, ACV, ACO, ACV, ACV, ASV, # 80 - 87
ASV, ASV, ASV, ASV, ASV, ASO, ASV, ASV, # 88 - 8F
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASV, # 90 - 97
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, ASO, # A0 - A7
OTH, OTH, ODD, ODD, OTH, OTH, ACV, ACV, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, ASV, ASV, # B8 - BF
OTH, OTH, ODD, OTH, ODD, OTH, OTH, OTH, # C0 - C7
OTH, OTH, OTH, ACV, ACV, ACV, ACV, ASV, # C8 - CF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, ODD, # D0 - D7
ASV, ACV, ODD, OTH, OTH, OTH, OTH, OTH, # D8 - DF
OTH, OTH, OTH, OTH, OTH, ACV, ACV, ACV, # E0 - E7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # E8 - EF
ODD, ACV, ACV, ACV, ACV, ASV, ODD, ODD, # F0 - F7
ODD, ODD, ODD, ODD, ODD, ODD, ODD, ODD, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
MacRomanClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO ODD
0, 0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, 1, # OTH
0, 3, 3, 3, 3, 3, 3, 3, 1, # ASC
0, 3, 3, 3, 1, 1, 3, 3, 1, # ASS
0, 3, 3, 3, 1, 2, 1, 2, 1, # ACV
0, 3, 3, 3, 3, 3, 3, 3, 1, # ACO
0, 3, 1, 3, 1, 1, 1, 3, 1, # ASV
0, 3, 1, 3, 1, 1, 3, 3, 1, # ASO
0, 1, 1, 1, 1, 1, 1, 1, 1, # ODD
)
# fmt: on
class MacRomanProber(CharSetProber):
def __init__(self) -> None:
super().__init__()
self._last_char_class = OTH
self._freq_counter: List[int] = []
self.reset()
def reset(self) -> None:
self._last_char_class = OTH
self._freq_counter = [0] * FREQ_CAT_NUM
# express the prior that MacRoman is a somewhat rare encoding;
# this can be done by starting out in a slightly improbable state
# that must be overcome
self._freq_counter[2] = 10
super().reset()
@property
def charset_name(self) -> str:
return "MacRoman"
@property
def language(self) -> str:
return ""
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
byte_str = self.remove_xml_tags(byte_str)
for c in byte_str:
char_class = MacRoman_CharToClass[c]
freq = MacRomanClassModel[(self._last_char_class * CLASS_NUM) + char_class]
if freq == 0:
self._state = ProbingState.NOT_ME
break
self._freq_counter[freq] += 1
self._last_char_class = char_class
return self.state
def get_confidence(self) -> float:
if self.state == ProbingState.NOT_ME:
return 0.01
total = sum(self._freq_counter)
confidence = (
0.0
if total < 0.01
else (self._freq_counter[3] - self._freq_counter[1] * 20.0) / total
)
confidence = max(confidence, 0.0)
# lower the confidence of MacRoman so that other more accurate
# detector can take priority.
confidence *= 0.73
return confidence

View file

@ -27,8 +27,12 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from typing import Optional, Union
from .chardistribution import CharDistributionAnalysis
from .charsetprober import CharSetProber from .charsetprober import CharSetProber
from .enums import ProbingState, MachineState from .codingstatemachine import CodingStateMachine
from .enums import LanguageFilter, MachineState, ProbingState
class MultiByteCharSetProber(CharSetProber): class MultiByteCharSetProber(CharSetProber):
@ -36,56 +40,56 @@ class MultiByteCharSetProber(CharSetProber):
MultiByteCharSetProber MultiByteCharSetProber
""" """
def __init__(self, lang_filter=None): def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter) super().__init__(lang_filter=lang_filter)
self.distribution_analyzer = None self.distribution_analyzer: Optional[CharDistributionAnalysis] = None
self.coding_sm = None self.coding_sm: Optional[CodingStateMachine] = None
self._last_char = [0, 0] self._last_char = bytearray(b"\0\0")
def reset(self): def reset(self) -> None:
super(MultiByteCharSetProber, self).reset() super().reset()
if self.coding_sm: if self.coding_sm:
self.coding_sm.reset() self.coding_sm.reset()
if self.distribution_analyzer: if self.distribution_analyzer:
self.distribution_analyzer.reset() self.distribution_analyzer.reset()
self._last_char = [0, 0] self._last_char = bytearray(b"\0\0")
@property def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
def charset_name(self): assert self.coding_sm is not None
raise NotImplementedError assert self.distribution_analyzer is not None
@property for i, byte in enumerate(byte_str):
def language(self): coding_state = self.coding_sm.next_state(byte)
raise NotImplementedError
def feed(self, byte_str):
for i in range(len(byte_str)):
coding_state = self.coding_sm.next_state(byte_str[i])
if coding_state == MachineState.ERROR: if coding_state == MachineState.ERROR:
self.logger.debug('%s %s prober hit error at byte %s', self.logger.debug(
self.charset_name, self.language, i) "%s %s prober hit error at byte %s",
self.charset_name,
self.language,
i,
)
self._state = ProbingState.NOT_ME self._state = ProbingState.NOT_ME
break break
elif coding_state == MachineState.ITS_ME: if coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT self._state = ProbingState.FOUND_IT
break break
elif coding_state == MachineState.START: if coding_state == MachineState.START:
char_len = self.coding_sm.get_current_charlen() char_len = self.coding_sm.get_current_charlen()
if i == 0: if i == 0:
self._last_char[1] = byte_str[0] self._last_char[1] = byte
self.distribution_analyzer.feed(self._last_char, char_len) self.distribution_analyzer.feed(self._last_char, char_len)
else: else:
self.distribution_analyzer.feed(byte_str[i - 1:i + 1], self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
char_len)
self._last_char[0] = byte_str[-1] self._last_char[0] = byte_str[-1]
if self.state == ProbingState.DETECTING: if self.state == ProbingState.DETECTING:
if (self.distribution_analyzer.got_enough_data() and if self.distribution_analyzer.got_enough_data() and (
(self.get_confidence() > self.SHORTCUT_THRESHOLD)): self.get_confidence() > self.SHORTCUT_THRESHOLD
):
self._state = ProbingState.FOUND_IT self._state = ProbingState.FOUND_IT
return self.state return self.state
def get_confidence(self): def get_confidence(self) -> float:
assert self.distribution_analyzer is not None
return self.distribution_analyzer.get_confidence() return self.distribution_analyzer.get_confidence()

View file

@ -27,20 +27,22 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober from .big5prober import Big5Prober
from .charsetgroupprober import CharSetGroupProber
from .cp949prober import CP949Prober
from .enums import LanguageFilter
from .eucjpprober import EUCJPProber
from .euckrprober import EUCKRProber
from .euctwprober import EUCTWProber from .euctwprober import EUCTWProber
from .gb2312prober import GB2312Prober
from .johabprober import JOHABProber
from .sjisprober import SJISProber
from .utf8prober import UTF8Prober
class MBCSGroupProber(CharSetGroupProber): class MBCSGroupProber(CharSetGroupProber):
def __init__(self, lang_filter=None): def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
super(MBCSGroupProber, self).__init__(lang_filter=lang_filter) super().__init__(lang_filter=lang_filter)
self.probers = [ self.probers = [
UTF8Prober(), UTF8Prober(),
SJISProber(), SJISProber(),
@ -49,6 +51,7 @@ class MBCSGroupProber(CharSetGroupProber):
EUCKRProber(), EUCKRProber(),
CP949Prober(), CP949Prober(),
Big5Prober(), Big5Prober(),
EUCTWProber() EUCTWProber(),
JOHABProber(),
] ]
self.reset() self.reset()

View file

@ -25,10 +25,12 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from .codingstatemachinedict import CodingStateMachineDict
from .enums import MachineState from .enums import MachineState
# BIG5 # BIG5
# fmt: off
BIG5_CLS = ( BIG5_CLS = (
1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as legal value 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as legal value
1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
@ -69,17 +71,20 @@ BIG5_ST = (
MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17 MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17
) )
# fmt: on
BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0) BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0)
BIG5_SM_MODEL = {'class_table': BIG5_CLS, BIG5_SM_MODEL: CodingStateMachineDict = {
'class_factor': 5, "class_table": BIG5_CLS,
'state_table': BIG5_ST, "class_factor": 5,
'char_len_table': BIG5_CHAR_LEN_TABLE, "state_table": BIG5_ST,
'name': 'Big5'} "char_len_table": BIG5_CHAR_LEN_TABLE,
"name": "Big5",
}
# CP949 # CP949
# fmt: off
CP949_CLS = ( CP949_CLS = (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, # 00 - 0f 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, # 00 - 0f
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, # 10 - 1f 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, # 10 - 1f
@ -109,17 +114,20 @@ CP949_ST = (
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5 MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6 MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6
) )
# fmt: on
CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2) CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949_SM_MODEL = {'class_table': CP949_CLS, CP949_SM_MODEL: CodingStateMachineDict = {
'class_factor': 10, "class_table": CP949_CLS,
'state_table': CP949_ST, "class_factor": 10,
'char_len_table': CP949_CHAR_LEN_TABLE, "state_table": CP949_ST,
'name': 'CP949'} "char_len_table": CP949_CHAR_LEN_TABLE,
"name": "CP949",
}
# EUC-JP # EUC-JP
# fmt: off
EUCJP_CLS = ( EUCJP_CLS = (
4, 4, 4, 4, 4, 4, 4, 4, # 00 - 07 4, 4, 4, 4, 4, 4, 4, 4, # 00 - 07
4, 4, 4, 4, 4, 4, 5, 5, # 08 - 0f 4, 4, 4, 4, 4, 4, 5, 5, # 08 - 0f
@ -162,17 +170,20 @@ EUCJP_ST = (
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f
3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27
) )
# fmt: on
EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0) EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0)
EUCJP_SM_MODEL = {'class_table': EUCJP_CLS, EUCJP_SM_MODEL: CodingStateMachineDict = {
'class_factor': 6, "class_table": EUCJP_CLS,
'state_table': EUCJP_ST, "class_factor": 6,
'char_len_table': EUCJP_CHAR_LEN_TABLE, "state_table": EUCJP_ST,
'name': 'EUC-JP'} "char_len_table": EUCJP_CHAR_LEN_TABLE,
"name": "EUC-JP",
}
# EUC-KR # EUC-KR
# fmt: off
EUCKR_CLS = ( EUCKR_CLS = (
1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07
1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
@ -212,17 +223,77 @@ EUCKR_ST = (
MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
) )
# fmt: on
EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0) EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
EUCKR_SM_MODEL = {'class_table': EUCKR_CLS, EUCKR_SM_MODEL: CodingStateMachineDict = {
'class_factor': 4, "class_table": EUCKR_CLS,
'state_table': EUCKR_ST, "class_factor": 4,
'char_len_table': EUCKR_CHAR_LEN_TABLE, "state_table": EUCKR_ST,
'name': 'EUC-KR'} "char_len_table": EUCKR_CHAR_LEN_TABLE,
"name": "EUC-KR",
}
# JOHAB
# fmt: off
JOHAB_CLS = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,0,0, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,0,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,3,3,3,3,3,3,3, # 30 - 37
3,3,3,3,3,3,3,3, # 38 - 3f
3,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,2, # 78 - 7f
6,6,6,6,8,8,8,8, # 80 - 87
8,8,8,8,8,8,8,8, # 88 - 8f
8,7,7,7,7,7,7,7, # 90 - 97
7,7,7,7,7,7,7,7, # 98 - 9f
7,7,7,7,7,7,7,7, # a0 - a7
7,7,7,7,7,7,7,7, # a8 - af
7,7,7,7,7,7,7,7, # b0 - b7
7,7,7,7,7,7,7,7, # b8 - bf
7,7,7,7,7,7,7,7, # c0 - c7
7,7,7,7,7,7,7,7, # c8 - cf
7,7,7,7,5,5,5,5, # d0 - d7
5,9,9,9,9,9,9,5, # d8 - df
9,9,9,9,9,9,9,9, # e0 - e7
9,9,9,9,9,9,9,9, # e8 - ef
9,9,9,9,9,9,9,9, # f0 - f7
9,9,5,5,5,5,5,0 # f8 - ff
)
JOHAB_ST = (
# cls = 0 1 2 3 4 5 6 7 8 9
MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,3 ,3 ,4 , # MachineState.START
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR , # MachineState.ERROR
MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START , # 3
MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START , # 4
)
# fmt: on
JOHAB_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 0, 0, 2, 2, 2)
JOHAB_SM_MODEL: CodingStateMachineDict = {
"class_table": JOHAB_CLS,
"class_factor": 10,
"state_table": JOHAB_ST,
"char_len_table": JOHAB_CHAR_LEN_TABLE,
"name": "Johab",
}
# EUC-TW # EUC-TW
# fmt: off
EUCTW_CLS = ( EUCTW_CLS = (
2, 2, 2, 2, 2, 2, 2, 2, # 00 - 07 2, 2, 2, 2, 2, 2, 2, 2, # 00 - 07
2, 2, 2, 2, 2, 2, 0, 0, # 08 - 0f 2, 2, 2, 2, 2, 2, 0, 0, # 08 - 0f
@ -266,17 +337,20 @@ EUCTW_ST = (
5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27
MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
) )
# fmt: on
EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3) EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3)
EUCTW_SM_MODEL = {'class_table': EUCTW_CLS, EUCTW_SM_MODEL: CodingStateMachineDict = {
'class_factor': 7, "class_table": EUCTW_CLS,
'state_table': EUCTW_ST, "class_factor": 7,
'char_len_table': EUCTW_CHAR_LEN_TABLE, "state_table": EUCTW_ST,
'name': 'x-euc-tw'} "char_len_table": EUCTW_CHAR_LEN_TABLE,
"name": "x-euc-tw",
}
# GB2312 # GB2312
# fmt: off
GB2312_CLS = ( GB2312_CLS = (
1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07
1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
@ -320,6 +394,7 @@ GB2312_ST = (
MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27 MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
) )
# fmt: on
# To be accurate, the length of class 6 can be either 2 or 4. # To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since # But it is not necessary to discriminate between the two since
@ -328,14 +403,16 @@ GB2312_ST = (
# 2 here. # 2 here.
GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2) GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2)
GB2312_SM_MODEL = {'class_table': GB2312_CLS, GB2312_SM_MODEL: CodingStateMachineDict = {
'class_factor': 7, "class_table": GB2312_CLS,
'state_table': GB2312_ST, "class_factor": 7,
'char_len_table': GB2312_CHAR_LEN_TABLE, "state_table": GB2312_ST,
'name': 'GB2312'} "char_len_table": GB2312_CHAR_LEN_TABLE,
"name": "GB2312",
}
# Shift_JIS # Shift_JIS
# fmt: off
SJIS_CLS = ( SJIS_CLS = (
1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07
1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
@ -370,25 +447,28 @@ SJIS_CLS = (
3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7 3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7
3, 3, 3, 3, 3, 4, 4, 4, # e8 - ef 3, 3, 3, 3, 3, 4, 4, 4, # e8 - ef
3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7 3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7
3,3,3,3,3,0,0,0) # f8 - ff 3, 3, 3, 3, 3, 0, 0, 0, # f8 - ff
)
SJIS_ST = ( SJIS_ST = (
MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17 MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17
) )
# fmt: on
SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0) SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0)
SJIS_SM_MODEL = {'class_table': SJIS_CLS, SJIS_SM_MODEL: CodingStateMachineDict = {
'class_factor': 6, "class_table": SJIS_CLS,
'state_table': SJIS_ST, "class_factor": 6,
'char_len_table': SJIS_CHAR_LEN_TABLE, "state_table": SJIS_ST,
'name': 'Shift_JIS'} "char_len_table": SJIS_CHAR_LEN_TABLE,
"name": "Shift_JIS",
}
# UCS2-BE # UCS2-BE
# fmt: off
UCS2BE_CLS = ( UCS2BE_CLS = (
0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f 0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f
@ -433,17 +513,20 @@ UCS2BE_ST = (
5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f 5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f
6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37
) )
# fmt: on
UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2) UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2)
UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS, UCS2BE_SM_MODEL: CodingStateMachineDict = {
'class_factor': 6, "class_table": UCS2BE_CLS,
'state_table': UCS2BE_ST, "class_factor": 6,
'char_len_table': UCS2BE_CHAR_LEN_TABLE, "state_table": UCS2BE_ST,
'name': 'UTF-16BE'} "char_len_table": UCS2BE_CHAR_LEN_TABLE,
"name": "UTF-16BE",
}
# UCS2-LE # UCS2-LE
# fmt: off
UCS2LE_CLS = ( UCS2LE_CLS = (
0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f 0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f
@ -488,17 +571,20 @@ UCS2LE_ST = (
5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f 5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f
5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37
) )
# fmt: on
UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2) UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2)
UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS, UCS2LE_SM_MODEL: CodingStateMachineDict = {
'class_factor': 6, "class_table": UCS2LE_CLS,
'state_table': UCS2LE_ST, "class_factor": 6,
'char_len_table': UCS2LE_CHAR_LEN_TABLE, "state_table": UCS2LE_ST,
'name': 'UTF-16LE'} "char_len_table": UCS2LE_CHAR_LEN_TABLE,
"name": "UTF-16LE",
}
# UTF-8 # UTF-8
# fmt: off
UTF8_CLS = ( UTF8_CLS = (
1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as a legal value 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as a legal value
1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
@ -562,11 +648,14 @@ UTF8_ST = (
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7 MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf
) )
# fmt: on
UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6) UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8_SM_MODEL = {'class_table': UTF8_CLS, UTF8_SM_MODEL: CodingStateMachineDict = {
'class_factor': 16, "class_table": UTF8_CLS,
'state_table': UTF8_ST, "class_factor": 16,
'char_len_table': UTF8_CHAR_LEN_TABLE, "state_table": UTF8_ST,
'name': 'UTF-8'} "char_len_table": UTF8_CHAR_LEN_TABLE,
"name": "UTF-8",
}

View file

@ -1,19 +1,17 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" """
Metadata about languages used by our model training code for our Metadata about languages used by our model training code for our
SingleByteCharSetProbers. Could be used for other things in the future. SingleByteCharSetProbers. Could be used for other things in the future.
This code is based on the language metadata from the uchardet project. This code is based on the language metadata from the uchardet project.
""" """
from __future__ import absolute_import, print_function
from string import ascii_letters from string import ascii_letters
from typing import List, Optional
# TODO: Add Ukrainian (KOI8-U)
# TODO: Add Ukranian (KOI8-U) class Language:
class Language(object):
"""Metadata about a language useful for training models """Metadata about a language useful for training models
:ivar name: The human name for the language, in English. :ivar name: The human name for the language, in English.
@ -33,9 +31,17 @@ class Language(object):
Wikipedia for training data. Wikipedia for training data.
:type wiki_start_pages: list of str :type wiki_start_pages: list of str
""" """
def __init__(self, name=None, iso_code=None, use_ascii=True, charsets=None,
alphabet=None, wiki_start_pages=None): def __init__(
super(Language, self).__init__() self,
name: Optional[str] = None,
iso_code: Optional[str] = None,
use_ascii: bool = True,
charsets: Optional[List[str]] = None,
alphabet: Optional[str] = None,
wiki_start_pages: Optional[List[str]] = None,
) -> None:
super().__init__()
self.name = name self.name = name
self.iso_code = iso_code self.iso_code = iso_code
self.use_ascii = use_ascii self.use_ascii = use_ascii
@ -46,246 +52,282 @@ class Language(object):
else: else:
alphabet = ascii_letters alphabet = ascii_letters
elif not alphabet: elif not alphabet:
raise ValueError('Must supply alphabet if use_ascii is False') raise ValueError("Must supply alphabet if use_ascii is False")
self.alphabet = ''.join(sorted(set(alphabet))) if alphabet else None self.alphabet = "".join(sorted(set(alphabet))) if alphabet else None
self.wiki_start_pages = wiki_start_pages self.wiki_start_pages = wiki_start_pages
def __repr__(self): def __repr__(self) -> str:
return '{}({})'.format(self.__class__.__name__, param_str = ", ".join(
', '.join('{}={!r}'.format(k, v) f"{k}={v!r}" for k, v in self.__dict__.items() if not k.startswith("_")
for k, v in self.__dict__.items() )
if not k.startswith('_'))) return f"{self.__class__.__name__}({param_str})"
LANGUAGES = {'Arabic': Language(name='Arabic', LANGUAGES = {
iso_code='ar', "Arabic": Language(
name="Arabic",
iso_code="ar",
use_ascii=False, use_ascii=False,
# We only support encodings that use isolated # We only support encodings that use isolated
# forms, because the current recommendation is # forms, because the current recommendation is
# that the rendering system handles presentation # that the rendering system handles presentation
# forms. This means we purposefully skip IBM864. # forms. This means we purposefully skip IBM864.
charsets=['ISO-8859-6', 'WINDOWS-1256', charsets=["ISO-8859-6", "WINDOWS-1256", "CP720", "CP864"],
'CP720', 'CP864'], alphabet="ءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىيًٌٍَُِّ",
alphabet=u'ءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىيًٌٍَُِّ', wiki_start_pages=["الصفحة_الرئيسية"],
wiki_start_pages=[u'الصفحة_الرئيسية']), ),
'Belarusian': Language(name='Belarusian', "Belarusian": Language(
iso_code='be', name="Belarusian",
iso_code="be",
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-5', 'WINDOWS-1251', charsets=["ISO-8859-5", "WINDOWS-1251", "IBM866", "MacCyrillic"],
'IBM866', 'MacCyrillic'], alphabet="АБВГДЕЁЖЗІЙКЛМНОПРСТУЎФХЦЧШЫЬЭЮЯабвгдеёжзійклмнопрстуўфхцчшыьэюяʼ",
alphabet=(u'АБВГДЕЁЖЗІЙКЛМНОПРСТУЎФХЦЧШЫЬЭЮЯ' wiki_start_pages=["Галоўная_старонка"],
u'абвгдеёжзійклмнопрстуўфхцчшыьэюяʼ'), ),
wiki_start_pages=[u'Галоўная_старонка']), "Bulgarian": Language(
'Bulgarian': Language(name='Bulgarian', name="Bulgarian",
iso_code='bg', iso_code="bg",
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-5', 'WINDOWS-1251', charsets=["ISO-8859-5", "WINDOWS-1251", "IBM855"],
'IBM855'], alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя",
alphabet=(u'АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯ' wiki_start_pages=["Начална_страница"],
u'абвгдежзийклмнопрстуфхцчшщъьюя'), ),
wiki_start_pages=[u'Начална_страница']), "Czech": Language(
'Czech': Language(name='Czech', name="Czech",
iso_code='cz', iso_code="cz",
use_ascii=True, use_ascii=True,
charsets=['ISO-8859-2', 'WINDOWS-1250'], charsets=["ISO-8859-2", "WINDOWS-1250"],
alphabet=u'áčďéěíňóřšťúůýžÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ', alphabet="áčďéěíňóřšťúůýžÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ",
wiki_start_pages=[u'Hlavní_strana']), wiki_start_pages=["Hlavní_strana"],
'Danish': Language(name='Danish', ),
iso_code='da', "Danish": Language(
name="Danish",
iso_code="da",
use_ascii=True, use_ascii=True,
charsets=['ISO-8859-1', 'ISO-8859-15', charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
'WINDOWS-1252'], alphabet="æøåÆØÅ",
alphabet=u'æøåÆØÅ', wiki_start_pages=["Forside"],
wiki_start_pages=[u'Forside']), ),
'German': Language(name='German', "German": Language(
iso_code='de', name="German",
iso_code="de",
use_ascii=True, use_ascii=True,
charsets=['ISO-8859-1', 'WINDOWS-1252'], charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
alphabet=u'äöüßÄÖÜ', alphabet="äöüßẞÄÖÜ",
wiki_start_pages=[u'Wikipedia:Hauptseite']), wiki_start_pages=["Wikipedia:Hauptseite"],
'Greek': Language(name='Greek', ),
iso_code='el', "Greek": Language(
name="Greek",
iso_code="el",
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-7', 'WINDOWS-1253'], charsets=["ISO-8859-7", "WINDOWS-1253"],
alphabet=(u'αβγδεζηθικλμνξοπρσςτυφχψωάέήίόύώ' alphabet="αβγδεζηθικλμνξοπρσςτυφχψωάέήίόύώΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΣΤΥΦΧΨΩΆΈΉΊΌΎΏ",
u'ΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΣΤΥΦΧΨΩΆΈΉΊΌΎΏ'), wiki_start_pages=["Πύλη:Κύρια"],
wiki_start_pages=[u'Πύλη:Κύρια']), ),
'English': Language(name='English', "English": Language(
iso_code='en', name="English",
iso_code="en",
use_ascii=True, use_ascii=True,
charsets=['ISO-8859-1', 'WINDOWS-1252'], charsets=["ISO-8859-1", "WINDOWS-1252", "MacRoman"],
wiki_start_pages=[u'Main_Page']), wiki_start_pages=["Main_Page"],
'Esperanto': Language(name='Esperanto', ),
iso_code='eo', "Esperanto": Language(
name="Esperanto",
iso_code="eo",
# Q, W, X, and Y not used at all # Q, W, X, and Y not used at all
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-3'], charsets=["ISO-8859-3"],
alphabet=(u'abcĉdefgĝhĥijĵklmnoprsŝtuŭvz' alphabet="abcĉdefgĝhĥijĵklmnoprsŝtuŭvzABCĈDEFGĜHĤIJĴKLMNOPRSŜTUŬVZ",
u'ABCĈDEFGĜHĤIJĴKLMNOPRSŜTUŬVZ'), wiki_start_pages=["Vikipedio:Ĉefpaĝo"],
wiki_start_pages=[u'Vikipedio:Ĉefpaĝo']), ),
'Spanish': Language(name='Spanish', "Spanish": Language(
iso_code='es', name="Spanish",
iso_code="es",
use_ascii=True, use_ascii=True,
charsets=['ISO-8859-1', 'ISO-8859-15', charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
'WINDOWS-1252'], alphabet="ñáéíóúüÑÁÉÍÓÚÜ",
alphabet=u'ñáéíóúüÑÁÉÍÓÚÜ', wiki_start_pages=["Wikipedia:Portada"],
wiki_start_pages=[u'Wikipedia:Portada']), ),
'Estonian': Language(name='Estonian', "Estonian": Language(
iso_code='et', name="Estonian",
iso_code="et",
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-4', 'ISO-8859-13', charsets=["ISO-8859-4", "ISO-8859-13", "WINDOWS-1257"],
'WINDOWS-1257'],
# C, F, Š, Q, W, X, Y, Z, Ž are only for # C, F, Š, Q, W, X, Y, Z, Ž are only for
# loanwords # loanwords
alphabet=(u'ABDEGHIJKLMNOPRSTUVÕÄÖÜ' alphabet="ABDEGHIJKLMNOPRSTUVÕÄÖÜabdeghijklmnoprstuvõäöü",
u'abdeghijklmnoprstuvõäöü'), wiki_start_pages=["Esileht"],
wiki_start_pages=[u'Esileht']), ),
'Finnish': Language(name='Finnish', "Finnish": Language(
iso_code='fi', name="Finnish",
iso_code="fi",
use_ascii=True, use_ascii=True,
charsets=['ISO-8859-1', 'ISO-8859-15', charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
'WINDOWS-1252'], alphabet="ÅÄÖŠŽåäöšž",
alphabet=u'ÅÄÖŠŽåäöšž', wiki_start_pages=["Wikipedia:Etusivu"],
wiki_start_pages=[u'Wikipedia:Etusivu']), ),
'French': Language(name='French', "French": Language(
iso_code='fr', name="French",
iso_code="fr",
use_ascii=True, use_ascii=True,
charsets=['ISO-8859-1', 'ISO-8859-15', charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
'WINDOWS-1252'], alphabet="œàâçèéîïùûêŒÀÂÇÈÉÎÏÙÛÊ",
alphabet=u'œàâçèéîïùûêŒÀÂÇÈÉÎÏÙÛÊ', wiki_start_pages=["Wikipédia:Accueil_principal", "Bœuf (animal)"],
wiki_start_pages=[u'Wikipédia:Accueil_principal', ),
u'Bœuf (animal)']), "Hebrew": Language(
'Hebrew': Language(name='Hebrew', name="Hebrew",
iso_code='he', iso_code="he",
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-8', 'WINDOWS-1255'], charsets=["ISO-8859-8", "WINDOWS-1255"],
alphabet=u'אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ', alphabet="אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ",
wiki_start_pages=[u'עמוד_ראשי']), wiki_start_pages=["עמוד_ראשי"],
'Croatian': Language(name='Croatian', ),
iso_code='hr', "Croatian": Language(
name="Croatian",
iso_code="hr",
# Q, W, X, Y are only used for foreign words. # Q, W, X, Y are only used for foreign words.
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-2', 'WINDOWS-1250'], charsets=["ISO-8859-2", "WINDOWS-1250"],
alphabet=(u'abcčćdđefghijklmnoprsštuvzž' alphabet="abcčćdđefghijklmnoprsštuvzžABCČĆDĐEFGHIJKLMNOPRSŠTUVZŽ",
u'ABCČĆDĐEFGHIJKLMNOPRSŠTUVZŽ'), wiki_start_pages=["Glavna_stranica"],
wiki_start_pages=[u'Glavna_stranica']), ),
'Hungarian': Language(name='Hungarian', "Hungarian": Language(
iso_code='hu', name="Hungarian",
iso_code="hu",
# Q, W, X, Y are only used for foreign words. # Q, W, X, Y are only used for foreign words.
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-2', 'WINDOWS-1250'], charsets=["ISO-8859-2", "WINDOWS-1250"],
alphabet=(u'abcdefghijklmnoprstuvzáéíóöőúüű' alphabet="abcdefghijklmnoprstuvzáéíóöőúüűABCDEFGHIJKLMNOPRSTUVZÁÉÍÓÖŐÚÜŰ",
u'ABCDEFGHIJKLMNOPRSTUVZÁÉÍÓÖŐÚÜŰ'), wiki_start_pages=["Kezdőlap"],
wiki_start_pages=[u'Kezdőlap']), ),
'Italian': Language(name='Italian', "Italian": Language(
iso_code='it', name="Italian",
iso_code="it",
use_ascii=True, use_ascii=True,
charsets=['ISO-8859-1', 'ISO-8859-15', charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
'WINDOWS-1252'], alphabet="ÀÈÉÌÒÓÙàèéìòóù",
alphabet=u'ÀÈÉÌÒÓÙàèéìòóù', wiki_start_pages=["Pagina_principale"],
wiki_start_pages=[u'Pagina_principale']), ),
'Lithuanian': Language(name='Lithuanian', "Lithuanian": Language(
iso_code='lt', name="Lithuanian",
iso_code="lt",
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-13', 'WINDOWS-1257', charsets=["ISO-8859-13", "WINDOWS-1257", "ISO-8859-4"],
'ISO-8859-4'],
# Q, W, and X not used at all # Q, W, and X not used at all
alphabet=(u'AĄBCČDEĘĖFGHIĮYJKLMNOPRSŠTUŲŪVZŽ' alphabet="AĄBCČDEĘĖFGHIĮYJKLMNOPRSŠTUŲŪVZŽaąbcčdeęėfghiįyjklmnoprsštuųūvzž",
u'aąbcčdeęėfghiįyjklmnoprsštuųūvzž'), wiki_start_pages=["Pagrindinis_puslapis"],
wiki_start_pages=[u'Pagrindinis_puslapis']), ),
'Latvian': Language(name='Latvian', "Latvian": Language(
iso_code='lv', name="Latvian",
iso_code="lv",
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-13', 'WINDOWS-1257', charsets=["ISO-8859-13", "WINDOWS-1257", "ISO-8859-4"],
'ISO-8859-4'],
# Q, W, X, Y are only for loanwords # Q, W, X, Y are only for loanwords
alphabet=(u'AĀBCČDEĒFGĢHIĪJKĶLĻMNŅOPRSŠTUŪVZŽ' alphabet="AĀBCČDEĒFGĢHIĪJKĶLĻMNŅOPRSŠTUŪVZŽaābcčdeēfgģhiījkķlļmnņoprsštuūvzž",
u'aābcčdeēfgģhiījkķlļmnņoprsštuūvzž'), wiki_start_pages=["Sākumlapa"],
wiki_start_pages=[u'Sākumlapa']), ),
'Macedonian': Language(name='Macedonian', "Macedonian": Language(
iso_code='mk', name="Macedonian",
iso_code="mk",
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-5', 'WINDOWS-1251', charsets=["ISO-8859-5", "WINDOWS-1251", "MacCyrillic", "IBM855"],
'MacCyrillic', 'IBM855'], alphabet="АБВГДЃЕЖЗЅИЈКЛЉМНЊОПРСТЌУФХЦЧЏШабвгдѓежзѕијклљмнњопрстќуфхцчџш",
alphabet=(u'АБВГДЃЕЖЗЅИЈКЛЉМНЊОПРСТЌУФХЦЧЏШ' wiki_start_pages=["Главна_страница"],
u'абвгдѓежзѕијклљмнњопрстќуфхцчџш'), ),
wiki_start_pages=[u'Главна_страница']), "Dutch": Language(
'Dutch': Language(name='Dutch', name="Dutch",
iso_code='nl', iso_code="nl",
use_ascii=True, use_ascii=True,
charsets=['ISO-8859-1', 'WINDOWS-1252'], charsets=["ISO-8859-1", "WINDOWS-1252", "MacRoman"],
wiki_start_pages=[u'Hoofdpagina']), wiki_start_pages=["Hoofdpagina"],
'Polish': Language(name='Polish', ),
iso_code='pl', "Polish": Language(
name="Polish",
iso_code="pl",
# Q and X are only used for foreign words. # Q and X are only used for foreign words.
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-2', 'WINDOWS-1250'], charsets=["ISO-8859-2", "WINDOWS-1250"],
alphabet=(u'AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻ' alphabet="AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻaąbcćdeęfghijklłmnńoóprsśtuwyzźż",
u'aąbcćdeęfghijklłmnńoóprsśtuwyzźż'), wiki_start_pages=["Wikipedia:Strona_główna"],
wiki_start_pages=[u'Wikipedia:Strona_główna']), ),
'Portuguese': Language(name='Portuguese', "Portuguese": Language(
iso_code='pt', name="Portuguese",
iso_code="pt",
use_ascii=True, use_ascii=True,
charsets=['ISO-8859-1', 'ISO-8859-15', charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
'WINDOWS-1252'], alphabet="ÁÂÃÀÇÉÊÍÓÔÕÚáâãàçéêíóôõú",
alphabet=u'ÁÂÃÀÇÉÊÍÓÔÕÚáâãàçéêíóôõú', wiki_start_pages=["Wikipédia:Página_principal"],
wiki_start_pages=[u'Wikipédia:Página_principal']), ),
'Romanian': Language(name='Romanian', "Romanian": Language(
iso_code='ro', name="Romanian",
iso_code="ro",
use_ascii=True, use_ascii=True,
charsets=['ISO-8859-2', 'WINDOWS-1250'], charsets=["ISO-8859-2", "WINDOWS-1250"],
alphabet=u'ăâîșțĂÂÎȘȚ', alphabet="ăâîșțĂÂÎȘȚ",
wiki_start_pages=[u'Pagina_principală']), wiki_start_pages=["Pagina_principală"],
'Russian': Language(name='Russian', ),
iso_code='ru', "Russian": Language(
name="Russian",
iso_code="ru",
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-5', 'WINDOWS-1251', charsets=[
'KOI8-R', 'MacCyrillic', 'IBM866', "ISO-8859-5",
'IBM855'], "WINDOWS-1251",
alphabet=(u'абвгдеёжзийклмнопрстуфхцчшщъыьэюя' "KOI8-R",
u'АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ'), "MacCyrillic",
wiki_start_pages=[u'Заглавная_страница']), "IBM866",
'Slovak': Language(name='Slovak', "IBM855",
iso_code='sk', ],
alphabet="абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ",
wiki_start_pages=["Заглавная_страница"],
),
"Slovak": Language(
name="Slovak",
iso_code="sk",
use_ascii=True, use_ascii=True,
charsets=['ISO-8859-2', 'WINDOWS-1250'], charsets=["ISO-8859-2", "WINDOWS-1250"],
alphabet=u'áäčďéíĺľňóôŕšťúýžÁÄČĎÉÍĹĽŇÓÔŔŠŤÚÝŽ', alphabet="áäčďéíĺľňóôŕšťúýžÁÄČĎÉÍĹĽŇÓÔŔŠŤÚÝŽ",
wiki_start_pages=[u'Hlavná_stránka']), wiki_start_pages=["Hlavná_stránka"],
'Slovene': Language(name='Slovene', ),
iso_code='sl', "Slovene": Language(
name="Slovene",
iso_code="sl",
# Q, W, X, Y are only used for foreign words. # Q, W, X, Y are only used for foreign words.
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-2', 'WINDOWS-1250'], charsets=["ISO-8859-2", "WINDOWS-1250"],
alphabet=(u'abcčdefghijklmnoprsštuvzž' alphabet="abcčdefghijklmnoprsštuvzžABCČDEFGHIJKLMNOPRSŠTUVZŽ",
u'ABCČDEFGHIJKLMNOPRSŠTUVZŽ'), wiki_start_pages=["Glavna_stran"],
wiki_start_pages=[u'Glavna_stran']), ),
# Serbian can be written in both Latin and Cyrillic, but there's no # Serbian can be written in both Latin and Cyrillic, but there's no
# simple way to get the Latin alphabet pages from Wikipedia through # simple way to get the Latin alphabet pages from Wikipedia through
# the API, so for now we just support Cyrillic. # the API, so for now we just support Cyrillic.
'Serbian': Language(name='Serbian', "Serbian": Language(
iso_code='sr', name="Serbian",
alphabet=(u'АБВГДЂЕЖЗИЈКЛЉМНЊОПРСТЋУФХЦЧЏШ' iso_code="sr",
u'абвгдђежзијклљмнњопрстћуфхцчџш'), alphabet="АБВГДЂЕЖЗИЈКЛЉМНЊОПРСТЋУФХЦЧЏШабвгдђежзијклљмнњопрстћуфхцчџш",
charsets=['ISO-8859-5', 'WINDOWS-1251', charsets=["ISO-8859-5", "WINDOWS-1251", "MacCyrillic", "IBM855"],
'MacCyrillic', 'IBM855'], wiki_start_pages=["Главна_страна"],
wiki_start_pages=[u'Главна_страна']), ),
'Thai': Language(name='Thai', "Thai": Language(
iso_code='th', name="Thai",
iso_code="th",
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-11', 'TIS-620', 'CP874'], charsets=["ISO-8859-11", "TIS-620", "CP874"],
alphabet=u'กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛', alphabet="กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛",
wiki_start_pages=[u'หน้าหลัก']), wiki_start_pages=["หน้าหลัก"],
'Turkish': Language(name='Turkish', ),
iso_code='tr', "Turkish": Language(
name="Turkish",
iso_code="tr",
# Q, W, and X are not used by Turkish # Q, W, and X are not used by Turkish
use_ascii=False, use_ascii=False,
charsets=['ISO-8859-3', 'ISO-8859-9', charsets=["ISO-8859-3", "ISO-8859-9", "WINDOWS-1254"],
'WINDOWS-1254'], alphabet="abcçdefgğhıijklmnoöprsştuüvyzâîûABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZÂÎÛ",
alphabet=(u'abcçdefgğhıijklmnoöprsştuüvyzâîû' wiki_start_pages=["Ana_Sayfa"],
u'ABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZÂÎÛ'), ),
wiki_start_pages=[u'Ana_Sayfa']), "Vietnamese": Language(
'Vietnamese': Language(name='Vietnamese', name="Vietnamese",
iso_code='vi', iso_code="vi",
use_ascii=False, use_ascii=False,
# Windows-1258 is the only common 8-bit # Windows-1258 is the only common 8-bit
# Vietnamese encoding supported by Python. # Vietnamese encoding supported by Python.
@ -303,8 +345,8 @@ LANGUAGES = {'Arabic': Language(name='Arabic',
# scheme has declined dramatically following # scheme has declined dramatically following
# the adoption of Unicode on the World Wide # the adoption of Unicode on the World Wide
# Web. # Web.
charsets=['WINDOWS-1258'], charsets=["WINDOWS-1258"],
alphabet=(u'aăâbcdđeêghiklmnoôơpqrstuưvxy' alphabet="aăâbcdđeêghiklmnoôơpqrstuưvxyAĂÂBCDĐEÊGHIKLMNOÔƠPQRSTUƯVXY",
u'AĂÂBCDĐEÊGHIKLMNOÔƠPQRSTUƯVXY'), wiki_start_pages=["Chữ_Quốc_ngữ"],
wiki_start_pages=[u'Chữ_Quốc_ngữ']), ),
} }

16
lib/chardet/resultdict.py Normal file
View file

@ -0,0 +1,16 @@
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
# TypedDict was introduced in Python 3.8.
#
# TODO: Remove the else block and TYPE_CHECKING check when dropping support
# for Python 3.7.
from typing import TypedDict
class ResultDict(TypedDict):
encoding: Optional[str]
confidence: float
language: Optional[str]
else:
ResultDict = dict

View file

@ -26,20 +26,20 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from collections import namedtuple from typing import Dict, List, NamedTuple, Optional, Union
from .charsetprober import CharSetProber from .charsetprober import CharSetProber
from .enums import CharacterCategory, ProbingState, SequenceLikelihood from .enums import CharacterCategory, ProbingState, SequenceLikelihood
SingleByteCharSetModel = namedtuple('SingleByteCharSetModel', class SingleByteCharSetModel(NamedTuple):
['charset_name', charset_name: str
'language', language: str
'char_to_order_map', char_to_order_map: Dict[int, int]
'language_model', language_model: Dict[int, Dict[int, int]]
'typical_positive_ratio', typical_positive_ratio: float
'keep_ascii_letters', keep_ascii_letters: bool
'alphabet']) alphabet: str
class SingleByteCharSetProber(CharSetProber): class SingleByteCharSetProber(CharSetProber):
@ -48,48 +48,55 @@ class SingleByteCharSetProber(CharSetProber):
POSITIVE_SHORTCUT_THRESHOLD = 0.95 POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05 NEGATIVE_SHORTCUT_THRESHOLD = 0.05
def __init__(self, model, reversed=False, name_prober=None): def __init__(
super(SingleByteCharSetProber, self).__init__() self,
model: SingleByteCharSetModel,
is_reversed: bool = False,
name_prober: Optional[CharSetProber] = None,
) -> None:
super().__init__()
self._model = model self._model = model
# TRUE if we need to reverse every pair in the model lookup # TRUE if we need to reverse every pair in the model lookup
self._reversed = reversed self._reversed = is_reversed
# Optional auxiliary prober for name decision # Optional auxiliary prober for name decision
self._name_prober = name_prober self._name_prober = name_prober
self._last_order = None self._last_order = 255
self._seq_counters = None self._seq_counters: List[int] = []
self._total_seqs = None self._total_seqs = 0
self._total_char = None self._total_char = 0
self._freq_char = None self._control_char = 0
self._freq_char = 0
self.reset() self.reset()
def reset(self): def reset(self) -> None:
super(SingleByteCharSetProber, self).reset() super().reset()
# char order of last character # char order of last character
self._last_order = 255 self._last_order = 255
self._seq_counters = [0] * SequenceLikelihood.get_num_categories() self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
self._total_seqs = 0 self._total_seqs = 0
self._total_char = 0 self._total_char = 0
self._control_char = 0
# characters that fall in our sampling range # characters that fall in our sampling range
self._freq_char = 0 self._freq_char = 0
@property @property
def charset_name(self): def charset_name(self) -> Optional[str]:
if self._name_prober: if self._name_prober:
return self._name_prober.charset_name return self._name_prober.charset_name
else:
return self._model.charset_name return self._model.charset_name
@property @property
def language(self): def language(self) -> Optional[str]:
if self._name_prober: if self._name_prober:
return self._name_prober.language return self._name_prober.language
else:
return self._model.language return self._model.language
def feed(self, byte_str): def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
# TODO: Make filter_international_words keep things in self.alphabet # TODO: Make filter_international_words keep things in self.alphabet
if not self._model.keep_ascii_letters: if not self._model.keep_ascii_letters:
byte_str = self.filter_international_words(byte_str) byte_str = self.filter_international_words(byte_str)
else:
byte_str = self.remove_xml_tags(byte_str)
if not byte_str: if not byte_str:
return self.state return self.state
char_to_order_map = self._model.char_to_order_map char_to_order_map = self._model.char_to_order_map
@ -103,9 +110,6 @@ class SingleByteCharSetProber(CharSetProber):
# _total_char purposes. # _total_char purposes.
if order < CharacterCategory.CONTROL: if order < CharacterCategory.CONTROL:
self._total_char += 1 self._total_char += 1
# TODO: Follow uchardet's lead and discount confidence for frequent
# control characters.
# See https://github.com/BYVoid/uchardet/commit/55b4f23971db61
if order < self.SAMPLE_SIZE: if order < self.SAMPLE_SIZE:
self._freq_char += 1 self._freq_char += 1
if self._last_order < self.SAMPLE_SIZE: if self._last_order < self.SAMPLE_SIZE:
@ -122,23 +126,36 @@ class SingleByteCharSetProber(CharSetProber):
if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD: if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
confidence = self.get_confidence() confidence = self.get_confidence()
if confidence > self.POSITIVE_SHORTCUT_THRESHOLD: if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
self.logger.debug('%s confidence = %s, we have a winner', self.logger.debug(
charset_name, confidence) "%s confidence = %s, we have a winner", charset_name, confidence
)
self._state = ProbingState.FOUND_IT self._state = ProbingState.FOUND_IT
elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD: elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
self.logger.debug('%s confidence = %s, below negative ' self.logger.debug(
'shortcut threshhold %s', charset_name, "%s confidence = %s, below negative shortcut threshold %s",
charset_name,
confidence, confidence,
self.NEGATIVE_SHORTCUT_THRESHOLD) self.NEGATIVE_SHORTCUT_THRESHOLD,
)
self._state = ProbingState.NOT_ME self._state = ProbingState.NOT_ME
return self.state return self.state
def get_confidence(self): def get_confidence(self) -> float:
r = 0.01 r = 0.01
if self._total_seqs > 0: if self._total_seqs > 0:
r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) / r = (
self._total_seqs / self._model.typical_positive_ratio) (
self._seq_counters[SequenceLikelihood.POSITIVE]
+ 0.25 * self._seq_counters[SequenceLikelihood.LIKELY]
)
/ self._total_seqs
/ self._model.typical_positive_ratio
)
# The more control characters (proportionnaly to the size
# of the text), the less confident we become in the current
# charset.
r = r * (self._total_char - self._control_char) / self._total_char
r = r * self._freq_char / self._total_char r = r * self._freq_char / self._total_char
if r >= 1.0: if r >= 1.0:
r = 0.99 r = 0.99

View file

@ -28,33 +28,38 @@
from .charsetgroupprober import CharSetGroupProber from .charsetgroupprober import CharSetGroupProber
from .hebrewprober import HebrewProber from .hebrewprober import HebrewProber
from .langbulgarianmodel import (ISO_8859_5_BULGARIAN_MODEL, from .langbulgarianmodel import ISO_8859_5_BULGARIAN_MODEL, WINDOWS_1251_BULGARIAN_MODEL
WINDOWS_1251_BULGARIAN_MODEL)
from .langgreekmodel import ISO_8859_7_GREEK_MODEL, WINDOWS_1253_GREEK_MODEL from .langgreekmodel import ISO_8859_7_GREEK_MODEL, WINDOWS_1253_GREEK_MODEL
from .langhebrewmodel import WINDOWS_1255_HEBREW_MODEL from .langhebrewmodel import WINDOWS_1255_HEBREW_MODEL
# from .langhungarianmodel import (ISO_8859_2_HUNGARIAN_MODEL, # from .langhungarianmodel import (ISO_8859_2_HUNGARIAN_MODEL,
# WINDOWS_1250_HUNGARIAN_MODEL) # WINDOWS_1250_HUNGARIAN_MODEL)
from .langrussianmodel import (IBM855_RUSSIAN_MODEL, IBM866_RUSSIAN_MODEL, from .langrussianmodel import (
ISO_8859_5_RUSSIAN_MODEL, KOI8_R_RUSSIAN_MODEL, IBM855_RUSSIAN_MODEL,
IBM866_RUSSIAN_MODEL,
ISO_8859_5_RUSSIAN_MODEL,
KOI8_R_RUSSIAN_MODEL,
MACCYRILLIC_RUSSIAN_MODEL, MACCYRILLIC_RUSSIAN_MODEL,
WINDOWS_1251_RUSSIAN_MODEL) WINDOWS_1251_RUSSIAN_MODEL,
)
from .langthaimodel import TIS_620_THAI_MODEL from .langthaimodel import TIS_620_THAI_MODEL
from .langturkishmodel import ISO_8859_9_TURKISH_MODEL from .langturkishmodel import ISO_8859_9_TURKISH_MODEL
from .sbcharsetprober import SingleByteCharSetProber from .sbcharsetprober import SingleByteCharSetProber
class SBCSGroupProber(CharSetGroupProber): class SBCSGroupProber(CharSetGroupProber):
def __init__(self): def __init__(self) -> None:
super(SBCSGroupProber, self).__init__() super().__init__()
hebrew_prober = HebrewProber() hebrew_prober = HebrewProber()
logical_hebrew_prober = SingleByteCharSetProber(WINDOWS_1255_HEBREW_MODEL, logical_hebrew_prober = SingleByteCharSetProber(
False, hebrew_prober) WINDOWS_1255_HEBREW_MODEL, is_reversed=False, name_prober=hebrew_prober
)
# TODO: See if using ISO-8859-8 Hebrew model works better here, since # TODO: See if using ISO-8859-8 Hebrew model works better here, since
# it's actually the visual one # it's actually the visual one
visual_hebrew_prober = SingleByteCharSetProber(WINDOWS_1255_HEBREW_MODEL, visual_hebrew_prober = SingleByteCharSetProber(
True, hebrew_prober) WINDOWS_1255_HEBREW_MODEL, is_reversed=True, name_prober=hebrew_prober
hebrew_prober.set_model_probers(logical_hebrew_prober, )
visual_hebrew_prober) hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober)
# TODO: ORDER MATTERS HERE. I changed the order vs what was in master # TODO: ORDER MATTERS HERE. I changed the order vs what was in master
# and several tests failed that did not before. Some thought # and several tests failed that did not before. Some thought
# should be put into the ordering, and we should consider making # should be put into the ordering, and we should consider making

View file

@ -25,68 +25,81 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber from typing import Union
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis from .chardistribution import SJISDistributionAnalysis
from .codingstatemachine import CodingStateMachine
from .enums import MachineState, ProbingState
from .jpcntx import SJISContextAnalysis from .jpcntx import SJISContextAnalysis
from .mbcharsetprober import MultiByteCharSetProber
from .mbcssm import SJIS_SM_MODEL from .mbcssm import SJIS_SM_MODEL
from .enums import ProbingState, MachineState
class SJISProber(MultiByteCharSetProber): class SJISProber(MultiByteCharSetProber):
def __init__(self): def __init__(self) -> None:
super(SJISProber, self).__init__() super().__init__()
self.coding_sm = CodingStateMachine(SJIS_SM_MODEL) self.coding_sm = CodingStateMachine(SJIS_SM_MODEL)
self.distribution_analyzer = SJISDistributionAnalysis() self.distribution_analyzer = SJISDistributionAnalysis()
self.context_analyzer = SJISContextAnalysis() self.context_analyzer = SJISContextAnalysis()
self.reset() self.reset()
def reset(self): def reset(self) -> None:
super(SJISProber, self).reset() super().reset()
self.context_analyzer.reset() self.context_analyzer.reset()
@property @property
def charset_name(self): def charset_name(self) -> str:
return self.context_analyzer.charset_name return self.context_analyzer.charset_name
@property @property
def language(self): def language(self) -> str:
return "Japanese" return "Japanese"
def feed(self, byte_str): def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
for i in range(len(byte_str)): assert self.coding_sm is not None
coding_state = self.coding_sm.next_state(byte_str[i]) assert self.distribution_analyzer is not None
for i, byte in enumerate(byte_str):
coding_state = self.coding_sm.next_state(byte)
if coding_state == MachineState.ERROR: if coding_state == MachineState.ERROR:
self.logger.debug('%s %s prober hit error at byte %s', self.logger.debug(
self.charset_name, self.language, i) "%s %s prober hit error at byte %s",
self.charset_name,
self.language,
i,
)
self._state = ProbingState.NOT_ME self._state = ProbingState.NOT_ME
break break
elif coding_state == MachineState.ITS_ME: if coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT self._state = ProbingState.FOUND_IT
break break
elif coding_state == MachineState.START: if coding_state == MachineState.START:
char_len = self.coding_sm.get_current_charlen() char_len = self.coding_sm.get_current_charlen()
if i == 0: if i == 0:
self._last_char[1] = byte_str[0] self._last_char[1] = byte
self.context_analyzer.feed(self._last_char[2 - char_len:], self.context_analyzer.feed(
char_len) self._last_char[2 - char_len :], char_len
)
self.distribution_analyzer.feed(self._last_char, char_len) self.distribution_analyzer.feed(self._last_char, char_len)
else: else:
self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3 self.context_analyzer.feed(
- char_len], char_len) byte_str[i + 1 - char_len : i + 3 - char_len], char_len
self.distribution_analyzer.feed(byte_str[i - 1:i + 1], )
char_len) self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
self._last_char[0] = byte_str[-1] self._last_char[0] = byte_str[-1]
if self.state == ProbingState.DETECTING: if self.state == ProbingState.DETECTING:
if (self.context_analyzer.got_enough_data() and if self.context_analyzer.got_enough_data() and (
(self.get_confidence() > self.SHORTCUT_THRESHOLD)): self.get_confidence() > self.SHORTCUT_THRESHOLD
):
self._state = ProbingState.FOUND_IT self._state = ProbingState.FOUND_IT
return self.state return self.state
def get_confidence(self): def get_confidence(self) -> float:
assert self.distribution_analyzer is not None
context_conf = self.context_analyzer.get_confidence() context_conf = self.context_analyzer.get_confidence()
distrib_conf = self.distribution_analyzer.get_confidence() distrib_conf = self.distribution_analyzer.get_confidence()
return max(context_conf, distrib_conf) return max(context_conf, distrib_conf)

View file

@ -39,16 +39,21 @@ class a user of ``chardet`` should use.
import codecs import codecs
import logging import logging
import re import re
from typing import List, Optional, Union
from .charsetgroupprober import CharSetGroupProber from .charsetgroupprober import CharSetGroupProber
from .charsetprober import CharSetProber
from .enums import InputState, LanguageFilter, ProbingState from .enums import InputState, LanguageFilter, ProbingState
from .escprober import EscCharSetProber from .escprober import EscCharSetProber
from .latin1prober import Latin1Prober from .latin1prober import Latin1Prober
from .macromanprober import MacRomanProber
from .mbcsgroupprober import MBCSGroupProber from .mbcsgroupprober import MBCSGroupProber
from .resultdict import ResultDict
from .sbcsgroupprober import SBCSGroupProber from .sbcsgroupprober import SBCSGroupProber
from .utf1632prober import UTF1632Prober
class UniversalDetector(object): class UniversalDetector:
""" """
The ``UniversalDetector`` class underlies the ``chardet.detect`` function The ``UniversalDetector`` class underlies the ``chardet.detect`` function
and coordinates all of the different charset probers. and coordinates all of the different charset probers.
@ -66,49 +71,87 @@ class UniversalDetector(object):
""" """
MINIMUM_THRESHOLD = 0.20 MINIMUM_THRESHOLD = 0.20
HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]') HIGH_BYTE_DETECTOR = re.compile(b"[\x80-\xFF]")
ESC_DETECTOR = re.compile(b'(\033|~{)') ESC_DETECTOR = re.compile(b"(\033|~{)")
WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]') WIN_BYTE_DETECTOR = re.compile(b"[\x80-\x9F]")
ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252', ISO_WIN_MAP = {
'iso-8859-2': 'Windows-1250', "iso-8859-1": "Windows-1252",
'iso-8859-5': 'Windows-1251', "iso-8859-2": "Windows-1250",
'iso-8859-6': 'Windows-1256', "iso-8859-5": "Windows-1251",
'iso-8859-7': 'Windows-1253', "iso-8859-6": "Windows-1256",
'iso-8859-8': 'Windows-1255', "iso-8859-7": "Windows-1253",
'iso-8859-9': 'Windows-1254', "iso-8859-8": "Windows-1255",
'iso-8859-13': 'Windows-1257'} "iso-8859-9": "Windows-1254",
"iso-8859-13": "Windows-1257",
}
# Based on https://encoding.spec.whatwg.org/#names-and-labels
# but altered to match Python names for encodings and remove mappings
# that break tests.
LEGACY_MAP = {
"ascii": "Windows-1252",
"iso-8859-1": "Windows-1252",
"tis-620": "ISO-8859-11",
"iso-8859-9": "Windows-1254",
"gb2312": "GB18030",
"euc-kr": "CP949",
"utf-16le": "UTF-16",
}
def __init__(self, lang_filter=LanguageFilter.ALL): def __init__(
self._esc_charset_prober = None self,
self._charset_probers = [] lang_filter: LanguageFilter = LanguageFilter.ALL,
self.result = None should_rename_legacy: bool = False,
self.done = None ) -> None:
self._got_data = None self._esc_charset_prober: Optional[EscCharSetProber] = None
self._input_state = None self._utf1632_prober: Optional[UTF1632Prober] = None
self._last_char = None self._charset_probers: List[CharSetProber] = []
self.result: ResultDict = {
"encoding": None,
"confidence": 0.0,
"language": None,
}
self.done = False
self._got_data = False
self._input_state = InputState.PURE_ASCII
self._last_char = b""
self.lang_filter = lang_filter self.lang_filter = lang_filter
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
self._has_win_bytes = None self._has_win_bytes = False
self.should_rename_legacy = should_rename_legacy
self.reset() self.reset()
def reset(self): @property
def input_state(self) -> int:
return self._input_state
@property
def has_win_bytes(self) -> bool:
return self._has_win_bytes
@property
def charset_probers(self) -> List[CharSetProber]:
return self._charset_probers
def reset(self) -> None:
""" """
Reset the UniversalDetector and all of its probers back to their Reset the UniversalDetector and all of its probers back to their
initial states. This is called by ``__init__``, so you only need to initial states. This is called by ``__init__``, so you only need to
call this directly in between analyses of different documents. call this directly in between analyses of different documents.
""" """
self.result = {'encoding': None, 'confidence': 0.0, 'language': None} self.result = {"encoding": None, "confidence": 0.0, "language": None}
self.done = False self.done = False
self._got_data = False self._got_data = False
self._has_win_bytes = False self._has_win_bytes = False
self._input_state = InputState.PURE_ASCII self._input_state = InputState.PURE_ASCII
self._last_char = b'' self._last_char = b""
if self._esc_charset_prober: if self._esc_charset_prober:
self._esc_charset_prober.reset() self._esc_charset_prober.reset()
if self._utf1632_prober:
self._utf1632_prober.reset()
for prober in self._charset_probers: for prober in self._charset_probers:
prober.reset() prober.reset()
def feed(self, byte_str): def feed(self, byte_str: Union[bytes, bytearray]) -> None:
""" """
Takes a chunk of a document and feeds it through all of the relevant Takes a chunk of a document and feeds it through all of the relevant
charset probers. charset probers.
@ -125,7 +168,7 @@ class UniversalDetector(object):
if self.done: if self.done:
return return
if not len(byte_str): if not byte_str:
return return
if not isinstance(byte_str, bytearray): if not isinstance(byte_str, bytearray):
@ -136,35 +179,38 @@ class UniversalDetector(object):
# If the data starts with BOM, we know it is UTF # If the data starts with BOM, we know it is UTF
if byte_str.startswith(codecs.BOM_UTF8): if byte_str.startswith(codecs.BOM_UTF8):
# EF BB BF UTF-8 with BOM # EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", self.result = {
'confidence': 1.0, "encoding": "UTF-8-SIG",
'language': ''} "confidence": 1.0,
elif byte_str.startswith((codecs.BOM_UTF32_LE, "language": "",
codecs.BOM_UTF32_BE)): }
elif byte_str.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):
# FF FE 00 00 UTF-32, little-endian BOM # FF FE 00 00 UTF-32, little-endian BOM
# 00 00 FE FF UTF-32, big-endian BOM # 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32", self.result = {"encoding": "UTF-32", "confidence": 1.0, "language": ""}
'confidence': 1.0, elif byte_str.startswith(b"\xFE\xFF\x00\x00"):
'language': ''}
elif byte_str.startswith(b'\xFE\xFF\x00\x00'):
# FE FF 00 00 UCS-4, unusual octet order BOM (3412) # FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {'encoding': "X-ISO-10646-UCS-4-3412", self.result = {
'confidence': 1.0, # TODO: This encoding is not supported by Python. Should remove?
'language': ''} "encoding": "X-ISO-10646-UCS-4-3412",
elif byte_str.startswith(b'\x00\x00\xFF\xFE'): "confidence": 1.0,
"language": "",
}
elif byte_str.startswith(b"\x00\x00\xFF\xFE"):
# 00 00 FF FE UCS-4, unusual octet order BOM (2143) # 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {'encoding': "X-ISO-10646-UCS-4-2143", self.result = {
'confidence': 1.0, # TODO: This encoding is not supported by Python. Should remove?
'language': ''} "encoding": "X-ISO-10646-UCS-4-2143",
"confidence": 1.0,
"language": "",
}
elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)): elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
# FF FE UTF-16, little endian BOM # FF FE UTF-16, little endian BOM
# FE FF UTF-16, big endian BOM # FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16", self.result = {"encoding": "UTF-16", "confidence": 1.0, "language": ""}
'confidence': 1.0,
'language': ''}
self._got_data = True self._got_data = True
if self.result['encoding'] is not None: if self.result["encoding"] is not None:
self.done = True self.done = True
return return
@ -173,12 +219,29 @@ class UniversalDetector(object):
if self._input_state == InputState.PURE_ASCII: if self._input_state == InputState.PURE_ASCII:
if self.HIGH_BYTE_DETECTOR.search(byte_str): if self.HIGH_BYTE_DETECTOR.search(byte_str):
self._input_state = InputState.HIGH_BYTE self._input_state = InputState.HIGH_BYTE
elif self._input_state == InputState.PURE_ASCII and \ elif (
self.ESC_DETECTOR.search(self._last_char + byte_str): self._input_state == InputState.PURE_ASCII
and self.ESC_DETECTOR.search(self._last_char + byte_str)
):
self._input_state = InputState.ESC_ASCII self._input_state = InputState.ESC_ASCII
self._last_char = byte_str[-1:] self._last_char = byte_str[-1:]
# next we will look to see if it is appears to be either a UTF-16 or
# UTF-32 encoding
if not self._utf1632_prober:
self._utf1632_prober = UTF1632Prober()
if self._utf1632_prober.state == ProbingState.DETECTING:
if self._utf1632_prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {
"encoding": self._utf1632_prober.charset_name,
"confidence": self._utf1632_prober.get_confidence(),
"language": "",
}
self.done = True
return
# If we've seen escape sequences, use the EscCharSetProber, which # If we've seen escape sequences, use the EscCharSetProber, which
# uses a simple state machine to check for known escape sequences in # uses a simple state machine to check for known escape sequences in
# HZ and ISO-2022 encodings, since those are the only encodings that # HZ and ISO-2022 encodings, since those are the only encodings that
@ -187,12 +250,11 @@ class UniversalDetector(object):
if not self._esc_charset_prober: if not self._esc_charset_prober:
self._esc_charset_prober = EscCharSetProber(self.lang_filter) self._esc_charset_prober = EscCharSetProber(self.lang_filter)
if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT: if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {'encoding': self.result = {
self._esc_charset_prober.charset_name, "encoding": self._esc_charset_prober.charset_name,
'confidence': "confidence": self._esc_charset_prober.get_confidence(),
self._esc_charset_prober.get_confidence(), "language": self._esc_charset_prober.language,
'language': }
self._esc_charset_prober.language}
self.done = True self.done = True
# If we've seen high bytes (i.e., those with values greater than 127), # If we've seen high bytes (i.e., those with values greater than 127),
# we need to do more complicated checks using all our multi-byte and # we need to do more complicated checks using all our multi-byte and
@ -207,17 +269,20 @@ class UniversalDetector(object):
if self.lang_filter & LanguageFilter.NON_CJK: if self.lang_filter & LanguageFilter.NON_CJK:
self._charset_probers.append(SBCSGroupProber()) self._charset_probers.append(SBCSGroupProber())
self._charset_probers.append(Latin1Prober()) self._charset_probers.append(Latin1Prober())
self._charset_probers.append(MacRomanProber())
for prober in self._charset_probers: for prober in self._charset_probers:
if prober.feed(byte_str) == ProbingState.FOUND_IT: if prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {'encoding': prober.charset_name, self.result = {
'confidence': prober.get_confidence(), "encoding": prober.charset_name,
'language': prober.language} "confidence": prober.get_confidence(),
"language": prober.language,
}
self.done = True self.done = True
break break
if self.WIN_BYTE_DETECTOR.search(byte_str): if self.WIN_BYTE_DETECTOR.search(byte_str):
self._has_win_bytes = True self._has_win_bytes = True
def close(self): def close(self) -> ResultDict:
""" """
Stop analyzing the current document and come up with a final Stop analyzing the current document and come up with a final
prediction. prediction.
@ -231,13 +296,11 @@ class UniversalDetector(object):
self.done = True self.done = True
if not self._got_data: if not self._got_data:
self.logger.debug('no data received!') self.logger.debug("no data received!")
# Default to ASCII if it is all we've seen so far # Default to ASCII if it is all we've seen so far
elif self._input_state == InputState.PURE_ASCII: elif self._input_state == InputState.PURE_ASCII:
self.result = {'encoding': 'ascii', self.result = {"encoding": "ascii", "confidence": 1.0, "language": ""}
'confidence': 1.0,
'language': ''}
# If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD
elif self._input_state == InputState.HIGH_BYTE: elif self._input_state == InputState.HIGH_BYTE:
@ -253,34 +316,47 @@ class UniversalDetector(object):
max_prober = prober max_prober = prober
if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD): if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):
charset_name = max_prober.charset_name charset_name = max_prober.charset_name
lower_charset_name = max_prober.charset_name.lower() assert charset_name is not None
lower_charset_name = charset_name.lower()
confidence = max_prober.get_confidence() confidence = max_prober.get_confidence()
# Use Windows encoding name instead of ISO-8859 if we saw any # Use Windows encoding name instead of ISO-8859 if we saw any
# extra Windows-specific bytes # extra Windows-specific bytes
if lower_charset_name.startswith('iso-8859'): if lower_charset_name.startswith("iso-8859"):
if self._has_win_bytes: if self._has_win_bytes:
charset_name = self.ISO_WIN_MAP.get(lower_charset_name, charset_name = self.ISO_WIN_MAP.get(
charset_name) lower_charset_name, charset_name
self.result = {'encoding': charset_name, )
'confidence': confidence, # Rename legacy encodings with superset encodings if asked
'language': max_prober.language} if self.should_rename_legacy:
charset_name = self.LEGACY_MAP.get(
(charset_name or "").lower(), charset_name
)
self.result = {
"encoding": charset_name,
"confidence": confidence,
"language": max_prober.language,
}
# Log all prober confidences if none met MINIMUM_THRESHOLD # Log all prober confidences if none met MINIMUM_THRESHOLD
if self.logger.getEffectiveLevel() <= logging.DEBUG: if self.logger.getEffectiveLevel() <= logging.DEBUG:
if self.result['encoding'] is None: if self.result["encoding"] is None:
self.logger.debug('no probers hit minimum threshold') self.logger.debug("no probers hit minimum threshold")
for group_prober in self._charset_probers: for group_prober in self._charset_probers:
if not group_prober: if not group_prober:
continue continue
if isinstance(group_prober, CharSetGroupProber): if isinstance(group_prober, CharSetGroupProber):
for prober in group_prober.probers: for prober in group_prober.probers:
self.logger.debug('%s %s confidence = %s', self.logger.debug(
"%s %s confidence = %s",
prober.charset_name, prober.charset_name,
prober.language, prober.language,
prober.get_confidence()) prober.get_confidence(),
)
else: else:
self.logger.debug('%s %s confidence = %s', self.logger.debug(
prober.charset_name, "%s %s confidence = %s",
prober.language, group_prober.charset_name,
prober.get_confidence()) group_prober.language,
group_prober.get_confidence(),
)
return self.result return self.result

View file

@ -0,0 +1,225 @@
######################## BEGIN LICENSE BLOCK ########################
#
# Contributor(s):
# Jason Zavaglia
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from typing import List, Union
from .charsetprober import CharSetProber
from .enums import ProbingState
class UTF1632Prober(CharSetProber):
"""
This class simply looks for occurrences of zero bytes, and infers
whether the file is UTF16 or UTF32 (low-endian or big-endian)
For instance, files looking like ( \0 \0 \0 [nonzero] )+
have a good probability to be UTF32BE. Files looking like ( \0 [nonzero] )+
may be guessed to be UTF16BE, and inversely for little-endian varieties.
"""
# how many logical characters to scan before feeling confident of prediction
MIN_CHARS_FOR_DETECTION = 20
# a fixed constant ratio of expected zeros or non-zeros in modulo-position.
EXPECTED_RATIO = 0.94
def __init__(self) -> None:
super().__init__()
self.position = 0
self.zeros_at_mod = [0] * 4
self.nonzeros_at_mod = [0] * 4
self._state = ProbingState.DETECTING
self.quad = [0, 0, 0, 0]
self.invalid_utf16be = False
self.invalid_utf16le = False
self.invalid_utf32be = False
self.invalid_utf32le = False
self.first_half_surrogate_pair_detected_16be = False
self.first_half_surrogate_pair_detected_16le = False
self.reset()
def reset(self) -> None:
super().reset()
self.position = 0
self.zeros_at_mod = [0] * 4
self.nonzeros_at_mod = [0] * 4
self._state = ProbingState.DETECTING
self.invalid_utf16be = False
self.invalid_utf16le = False
self.invalid_utf32be = False
self.invalid_utf32le = False
self.first_half_surrogate_pair_detected_16be = False
self.first_half_surrogate_pair_detected_16le = False
self.quad = [0, 0, 0, 0]
@property
def charset_name(self) -> str:
if self.is_likely_utf32be():
return "utf-32be"
if self.is_likely_utf32le():
return "utf-32le"
if self.is_likely_utf16be():
return "utf-16be"
if self.is_likely_utf16le():
return "utf-16le"
# default to something valid
return "utf-16"
@property
def language(self) -> str:
return ""
def approx_32bit_chars(self) -> float:
return max(1.0, self.position / 4.0)
def approx_16bit_chars(self) -> float:
return max(1.0, self.position / 2.0)
def is_likely_utf32be(self) -> bool:
approx_chars = self.approx_32bit_chars()
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
self.zeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
and self.nonzeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
and not self.invalid_utf32be
)
def is_likely_utf32le(self) -> bool:
approx_chars = self.approx_32bit_chars()
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
self.nonzeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
and self.zeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
and not self.invalid_utf32le
)
def is_likely_utf16be(self) -> bool:
approx_chars = self.approx_16bit_chars()
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
(self.nonzeros_at_mod[1] + self.nonzeros_at_mod[3]) / approx_chars
> self.EXPECTED_RATIO
and (self.zeros_at_mod[0] + self.zeros_at_mod[2]) / approx_chars
> self.EXPECTED_RATIO
and not self.invalid_utf16be
)
def is_likely_utf16le(self) -> bool:
approx_chars = self.approx_16bit_chars()
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
(self.nonzeros_at_mod[0] + self.nonzeros_at_mod[2]) / approx_chars
> self.EXPECTED_RATIO
and (self.zeros_at_mod[1] + self.zeros_at_mod[3]) / approx_chars
> self.EXPECTED_RATIO
and not self.invalid_utf16le
)
def validate_utf32_characters(self, quad: List[int]) -> None:
"""
Validate if the quad of bytes is valid UTF-32.
UTF-32 is valid in the range 0x00000000 - 0x0010FFFF
excluding 0x0000D800 - 0x0000DFFF
https://en.wikipedia.org/wiki/UTF-32
"""
if (
quad[0] != 0
or quad[1] > 0x10
or (quad[0] == 0 and quad[1] == 0 and 0xD8 <= quad[2] <= 0xDF)
):
self.invalid_utf32be = True
if (
quad[3] != 0
or quad[2] > 0x10
or (quad[3] == 0 and quad[2] == 0 and 0xD8 <= quad[1] <= 0xDF)
):
self.invalid_utf32le = True
def validate_utf16_characters(self, pair: List[int]) -> None:
"""
Validate if the pair of bytes is valid UTF-16.
UTF-16 is valid in the range 0x0000 - 0xFFFF excluding 0xD800 - 0xFFFF
with an exception for surrogate pairs, which must be in the range
0xD800-0xDBFF followed by 0xDC00-0xDFFF
https://en.wikipedia.org/wiki/UTF-16
"""
if not self.first_half_surrogate_pair_detected_16be:
if 0xD8 <= pair[0] <= 0xDB:
self.first_half_surrogate_pair_detected_16be = True
elif 0xDC <= pair[0] <= 0xDF:
self.invalid_utf16be = True
else:
if 0xDC <= pair[0] <= 0xDF:
self.first_half_surrogate_pair_detected_16be = False
else:
self.invalid_utf16be = True
if not self.first_half_surrogate_pair_detected_16le:
if 0xD8 <= pair[1] <= 0xDB:
self.first_half_surrogate_pair_detected_16le = True
elif 0xDC <= pair[1] <= 0xDF:
self.invalid_utf16le = True
else:
if 0xDC <= pair[1] <= 0xDF:
self.first_half_surrogate_pair_detected_16le = False
else:
self.invalid_utf16le = True
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
for c in byte_str:
mod4 = self.position % 4
self.quad[mod4] = c
if mod4 == 3:
self.validate_utf32_characters(self.quad)
self.validate_utf16_characters(self.quad[0:2])
self.validate_utf16_characters(self.quad[2:4])
if c == 0:
self.zeros_at_mod[mod4] += 1
else:
self.nonzeros_at_mod[mod4] += 1
self.position += 1
return self.state
@property
def state(self) -> ProbingState:
if self._state in {ProbingState.NOT_ME, ProbingState.FOUND_IT}:
# terminal, decided states
return self._state
if self.get_confidence() > 0.80:
self._state = ProbingState.FOUND_IT
elif self.position > 4 * 1024:
# if we get to 4kb into the file, and we can't conclude it's UTF,
# let's give up
self._state = ProbingState.NOT_ME
return self._state
def get_confidence(self) -> float:
return (
0.85
if (
self.is_likely_utf16le()
or self.is_likely_utf16be()
or self.is_likely_utf32le()
or self.is_likely_utf32be()
)
else 0.00
)

View file

@ -25,45 +25,46 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber from typing import Union
from .enums import ProbingState, MachineState
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8_SM_MODEL
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .enums import MachineState, ProbingState
from .mbcssm import UTF8_SM_MODEL
class UTF8Prober(CharSetProber): class UTF8Prober(CharSetProber):
ONE_CHAR_PROB = 0.5 ONE_CHAR_PROB = 0.5
def __init__(self): def __init__(self) -> None:
super(UTF8Prober, self).__init__() super().__init__()
self.coding_sm = CodingStateMachine(UTF8_SM_MODEL) self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
self._num_mb_chars = None self._num_mb_chars = 0
self.reset() self.reset()
def reset(self): def reset(self) -> None:
super(UTF8Prober, self).reset() super().reset()
self.coding_sm.reset() self.coding_sm.reset()
self._num_mb_chars = 0 self._num_mb_chars = 0
@property @property
def charset_name(self): def charset_name(self) -> str:
return "utf-8" return "utf-8"
@property @property
def language(self): def language(self) -> str:
return "" return ""
def feed(self, byte_str): def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
for c in byte_str: for c in byte_str:
coding_state = self.coding_sm.next_state(c) coding_state = self.coding_sm.next_state(c)
if coding_state == MachineState.ERROR: if coding_state == MachineState.ERROR:
self._state = ProbingState.NOT_ME self._state = ProbingState.NOT_ME
break break
elif coding_state == MachineState.ITS_ME: if coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT self._state = ProbingState.FOUND_IT
break break
elif coding_state == MachineState.START: if coding_state == MachineState.START:
if self.coding_sm.get_current_charlen() >= 2: if self.coding_sm.get_current_charlen() >= 2:
self._num_mb_chars += 1 self._num_mb_chars += 1
@ -73,10 +74,9 @@ class UTF8Prober(CharSetProber):
return self.state return self.state
def get_confidence(self): def get_confidence(self) -> float:
unlike = 0.99 unlike = 0.99
if self._num_mb_chars < 6: if self._num_mb_chars < 6:
unlike *= self.ONE_CHAR_PROB**self._num_mb_chars unlike *= self.ONE_CHAR_PROB**self._num_mb_chars
return 1.0 - unlike return 1.0 - unlike
else:
return unlike return unlike

View file

@ -1,9 +1,9 @@
""" """
This module exists only to simplify retrieving the version number of chardet This module exists only to simplify retrieving the version number of chardet
from within setup.py and from chardet subpackages. from within setuptools and from chardet subpackages.
:author: Dan Blanchard (dan.blanchard@gmail.com) :author: Dan Blanchard (dan.blanchard@gmail.com)
""" """
__version__ = "4.0.0" __version__ = "5.1.0"
VERSION = __version__.split('.') VERSION = __version__.split(".")