mirror of
https://github.com/SickGear/SickGear.git
synced 2024-12-02 17:33:37 +00:00
Merge branch 'feature/ChangeReqs' into develop
This commit is contained in:
commit
3d546816cf
10 changed files with 10775 additions and 3 deletions
|
@ -9,10 +9,11 @@
|
||||||
* Update PySocks 1.6.8 (524ceb4) to 1.6.8 (b687a34)
|
* Update PySocks 1.6.8 (524ceb4) to 1.6.8 (b687a34)
|
||||||
* Update Requests library 2.15.1 (282b01a) to 2.19.1 (33b41c7)
|
* Update Requests library 2.15.1 (282b01a) to 2.19.1 (33b41c7)
|
||||||
* Update scandir module 1.6 (c3592ee) to 1.9.0 (9ab3d1f)
|
* Update scandir module 1.6 (c3592ee) to 1.9.0 (9ab3d1f)
|
||||||
* Add urllib3 release 1.23 (7c216f4)
|
|
||||||
* Change if old scandir binary module is installed, fallback to slow Python module and inform user to upgrade binary
|
|
||||||
* Update SimpleJSON 3.13.2 (6ffddbe) to 3.16.0 (e2a54f7)
|
* Update SimpleJSON 3.13.2 (6ffddbe) to 3.16.0 (e2a54f7)
|
||||||
* Update unidecode module 1.0.22 (81f938d) to 1.0.22 (578cdb9)
|
* Update unidecode module 1.0.22 (81f938d) to 1.0.22 (578cdb9)
|
||||||
|
* Add idna library 2.7 (0f50bdc)
|
||||||
|
* Add urllib3 release 1.23 (7c216f4)
|
||||||
|
* Change if old scandir binary module is installed, fallback to slow Python module and inform user to upgrade binary
|
||||||
* Change site services tester to fallback to http if error with SSL
|
* Change site services tester to fallback to http if error with SSL
|
||||||
* Change try to use folder name when filename does not contain show name
|
* Change try to use folder name when filename does not contain show name
|
||||||
* Change force redirects in TVMaze API to be https
|
* Change force redirects in TVMaze API to be https
|
||||||
|
@ -20,6 +21,7 @@
|
||||||
* Add search results sort by Z to A
|
* Add search results sort by Z to A
|
||||||
* Add search results sort by newest aired
|
* Add search results sort by newest aired
|
||||||
* Add search results sort by oldest aired
|
* Add search results sort by oldest aired
|
||||||
|
* Change requirements.txt Cheetah >= 3.1.0
|
||||||
|
|
||||||
|
|
||||||
[develop changelog]
|
[develop changelog]
|
||||||
|
|
2
lib/idna/__init__.py
Normal file
2
lib/idna/__init__.py
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
from .package_data import __version__
|
||||||
|
from .core import *
|
118
lib/idna/codec.py
Normal file
118
lib/idna/codec.py
Normal file
|
@ -0,0 +1,118 @@
|
||||||
|
from .core import encode, decode, alabel, ulabel, IDNAError
|
||||||
|
import codecs
|
||||||
|
import re
|
||||||
|
|
||||||
|
_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
|
||||||
|
|
||||||
|
class Codec(codecs.Codec):
|
||||||
|
|
||||||
|
def encode(self, data, errors='strict'):
|
||||||
|
|
||||||
|
if errors != 'strict':
|
||||||
|
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
return "", 0
|
||||||
|
|
||||||
|
return encode(data), len(data)
|
||||||
|
|
||||||
|
def decode(self, data, errors='strict'):
|
||||||
|
|
||||||
|
if errors != 'strict':
|
||||||
|
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
return u"", 0
|
||||||
|
|
||||||
|
return decode(data), len(data)
|
||||||
|
|
||||||
|
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
|
||||||
|
def _buffer_encode(self, data, errors, final):
|
||||||
|
if errors != 'strict':
|
||||||
|
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
return ("", 0)
|
||||||
|
|
||||||
|
labels = _unicode_dots_re.split(data)
|
||||||
|
trailing_dot = u''
|
||||||
|
if labels:
|
||||||
|
if not labels[-1]:
|
||||||
|
trailing_dot = '.'
|
||||||
|
del labels[-1]
|
||||||
|
elif not final:
|
||||||
|
# Keep potentially unfinished label until the next call
|
||||||
|
del labels[-1]
|
||||||
|
if labels:
|
||||||
|
trailing_dot = '.'
|
||||||
|
|
||||||
|
result = []
|
||||||
|
size = 0
|
||||||
|
for label in labels:
|
||||||
|
result.append(alabel(label))
|
||||||
|
if size:
|
||||||
|
size += 1
|
||||||
|
size += len(label)
|
||||||
|
|
||||||
|
# Join with U+002E
|
||||||
|
result = ".".join(result) + trailing_dot
|
||||||
|
size += len(trailing_dot)
|
||||||
|
return (result, size)
|
||||||
|
|
||||||
|
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
|
||||||
|
def _buffer_decode(self, data, errors, final):
|
||||||
|
if errors != 'strict':
|
||||||
|
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
return (u"", 0)
|
||||||
|
|
||||||
|
# IDNA allows decoding to operate on Unicode strings, too.
|
||||||
|
if isinstance(data, unicode):
|
||||||
|
labels = _unicode_dots_re.split(data)
|
||||||
|
else:
|
||||||
|
# Must be ASCII string
|
||||||
|
data = str(data)
|
||||||
|
unicode(data, "ascii")
|
||||||
|
labels = data.split(".")
|
||||||
|
|
||||||
|
trailing_dot = u''
|
||||||
|
if labels:
|
||||||
|
if not labels[-1]:
|
||||||
|
trailing_dot = u'.'
|
||||||
|
del labels[-1]
|
||||||
|
elif not final:
|
||||||
|
# Keep potentially unfinished label until the next call
|
||||||
|
del labels[-1]
|
||||||
|
if labels:
|
||||||
|
trailing_dot = u'.'
|
||||||
|
|
||||||
|
result = []
|
||||||
|
size = 0
|
||||||
|
for label in labels:
|
||||||
|
result.append(ulabel(label))
|
||||||
|
if size:
|
||||||
|
size += 1
|
||||||
|
size += len(label)
|
||||||
|
|
||||||
|
result = u".".join(result) + trailing_dot
|
||||||
|
size += len(trailing_dot)
|
||||||
|
return (result, size)
|
||||||
|
|
||||||
|
|
||||||
|
class StreamWriter(Codec, codecs.StreamWriter):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class StreamReader(Codec, codecs.StreamReader):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def getregentry():
|
||||||
|
return codecs.CodecInfo(
|
||||||
|
name='idna',
|
||||||
|
encode=Codec().encode,
|
||||||
|
decode=Codec().decode,
|
||||||
|
incrementalencoder=IncrementalEncoder,
|
||||||
|
incrementaldecoder=IncrementalDecoder,
|
||||||
|
streamwriter=StreamWriter,
|
||||||
|
streamreader=StreamReader,
|
||||||
|
)
|
12
lib/idna/compat.py
Normal file
12
lib/idna/compat.py
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
from .core import *
|
||||||
|
from .codec import *
|
||||||
|
|
||||||
|
def ToASCII(label):
|
||||||
|
return encode(label)
|
||||||
|
|
||||||
|
def ToUnicode(label):
|
||||||
|
return decode(label)
|
||||||
|
|
||||||
|
def nameprep(s):
|
||||||
|
raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol")
|
||||||
|
|
399
lib/idna/core.py
Normal file
399
lib/idna/core.py
Normal file
|
@ -0,0 +1,399 @@
|
||||||
|
from . import idnadata
|
||||||
|
import bisect
|
||||||
|
import unicodedata
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from .intranges import intranges_contain
|
||||||
|
|
||||||
|
_virama_combining_class = 9
|
||||||
|
_alabel_prefix = b'xn--'
|
||||||
|
_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
|
||||||
|
|
||||||
|
if sys.version_info[0] == 3:
|
||||||
|
unicode = str
|
||||||
|
unichr = chr
|
||||||
|
|
||||||
|
class IDNAError(UnicodeError):
|
||||||
|
""" Base exception for all IDNA-encoding related problems """
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class IDNABidiError(IDNAError):
|
||||||
|
""" Exception when bidirectional requirements are not satisfied """
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidCodepoint(IDNAError):
|
||||||
|
""" Exception when a disallowed or unallocated codepoint is used """
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidCodepointContext(IDNAError):
|
||||||
|
""" Exception when the codepoint is not valid in the context it is used """
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _combining_class(cp):
|
||||||
|
v = unicodedata.combining(unichr(cp))
|
||||||
|
if v == 0:
|
||||||
|
if not unicodedata.name(unichr(cp)):
|
||||||
|
raise ValueError("Unknown character in unicodedata")
|
||||||
|
return v
|
||||||
|
|
||||||
|
def _is_script(cp, script):
|
||||||
|
return intranges_contain(ord(cp), idnadata.scripts[script])
|
||||||
|
|
||||||
|
def _punycode(s):
|
||||||
|
return s.encode('punycode')
|
||||||
|
|
||||||
|
def _unot(s):
|
||||||
|
return 'U+{0:04X}'.format(s)
|
||||||
|
|
||||||
|
|
||||||
|
def valid_label_length(label):
|
||||||
|
|
||||||
|
if len(label) > 63:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def valid_string_length(label, trailing_dot):
|
||||||
|
|
||||||
|
if len(label) > (254 if trailing_dot else 253):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def check_bidi(label, check_ltr=False):
|
||||||
|
|
||||||
|
# Bidi rules should only be applied if string contains RTL characters
|
||||||
|
bidi_label = False
|
||||||
|
for (idx, cp) in enumerate(label, 1):
|
||||||
|
direction = unicodedata.bidirectional(cp)
|
||||||
|
if direction == '':
|
||||||
|
# String likely comes from a newer version of Unicode
|
||||||
|
raise IDNABidiError('Unknown directionality in label {0} at position {1}'.format(repr(label), idx))
|
||||||
|
if direction in ['R', 'AL', 'AN']:
|
||||||
|
bidi_label = True
|
||||||
|
if not bidi_label and not check_ltr:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Bidi rule 1
|
||||||
|
direction = unicodedata.bidirectional(label[0])
|
||||||
|
if direction in ['R', 'AL']:
|
||||||
|
rtl = True
|
||||||
|
elif direction == 'L':
|
||||||
|
rtl = False
|
||||||
|
else:
|
||||||
|
raise IDNABidiError('First codepoint in label {0} must be directionality L, R or AL'.format(repr(label)))
|
||||||
|
|
||||||
|
valid_ending = False
|
||||||
|
number_type = False
|
||||||
|
for (idx, cp) in enumerate(label, 1):
|
||||||
|
direction = unicodedata.bidirectional(cp)
|
||||||
|
|
||||||
|
if rtl:
|
||||||
|
# Bidi rule 2
|
||||||
|
if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
|
||||||
|
raise IDNABidiError('Invalid direction for codepoint at position {0} in a right-to-left label'.format(idx))
|
||||||
|
# Bidi rule 3
|
||||||
|
if direction in ['R', 'AL', 'EN', 'AN']:
|
||||||
|
valid_ending = True
|
||||||
|
elif direction != 'NSM':
|
||||||
|
valid_ending = False
|
||||||
|
# Bidi rule 4
|
||||||
|
if direction in ['AN', 'EN']:
|
||||||
|
if not number_type:
|
||||||
|
number_type = direction
|
||||||
|
else:
|
||||||
|
if number_type != direction:
|
||||||
|
raise IDNABidiError('Can not mix numeral types in a right-to-left label')
|
||||||
|
else:
|
||||||
|
# Bidi rule 5
|
||||||
|
if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
|
||||||
|
raise IDNABidiError('Invalid direction for codepoint at position {0} in a left-to-right label'.format(idx))
|
||||||
|
# Bidi rule 6
|
||||||
|
if direction in ['L', 'EN']:
|
||||||
|
valid_ending = True
|
||||||
|
elif direction != 'NSM':
|
||||||
|
valid_ending = False
|
||||||
|
|
||||||
|
if not valid_ending:
|
||||||
|
raise IDNABidiError('Label ends with illegal codepoint directionality')
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def check_initial_combiner(label):
|
||||||
|
|
||||||
|
if unicodedata.category(label[0])[0] == 'M':
|
||||||
|
raise IDNAError('Label begins with an illegal combining character')
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def check_hyphen_ok(label):
|
||||||
|
|
||||||
|
if label[2:4] == '--':
|
||||||
|
raise IDNAError('Label has disallowed hyphens in 3rd and 4th position')
|
||||||
|
if label[0] == '-' or label[-1] == '-':
|
||||||
|
raise IDNAError('Label must not start or end with a hyphen')
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def check_nfc(label):
|
||||||
|
|
||||||
|
if unicodedata.normalize('NFC', label) != label:
|
||||||
|
raise IDNAError('Label must be in Normalization Form C')
|
||||||
|
|
||||||
|
|
||||||
|
def valid_contextj(label, pos):
|
||||||
|
|
||||||
|
cp_value = ord(label[pos])
|
||||||
|
|
||||||
|
if cp_value == 0x200c:
|
||||||
|
|
||||||
|
if pos > 0:
|
||||||
|
if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
|
||||||
|
return True
|
||||||
|
|
||||||
|
ok = False
|
||||||
|
for i in range(pos-1, -1, -1):
|
||||||
|
joining_type = idnadata.joining_types.get(ord(label[i]))
|
||||||
|
if joining_type == ord('T'):
|
||||||
|
continue
|
||||||
|
if joining_type in [ord('L'), ord('D')]:
|
||||||
|
ok = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if not ok:
|
||||||
|
return False
|
||||||
|
|
||||||
|
ok = False
|
||||||
|
for i in range(pos+1, len(label)):
|
||||||
|
joining_type = idnadata.joining_types.get(ord(label[i]))
|
||||||
|
if joining_type == ord('T'):
|
||||||
|
continue
|
||||||
|
if joining_type in [ord('R'), ord('D')]:
|
||||||
|
ok = True
|
||||||
|
break
|
||||||
|
return ok
|
||||||
|
|
||||||
|
if cp_value == 0x200d:
|
||||||
|
|
||||||
|
if pos > 0:
|
||||||
|
if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def valid_contexto(label, pos, exception=False):
|
||||||
|
|
||||||
|
cp_value = ord(label[pos])
|
||||||
|
|
||||||
|
if cp_value == 0x00b7:
|
||||||
|
if 0 < pos < len(label)-1:
|
||||||
|
if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cp_value == 0x0375:
|
||||||
|
if pos < len(label)-1 and len(label) > 1:
|
||||||
|
return _is_script(label[pos + 1], 'Greek')
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cp_value == 0x05f3 or cp_value == 0x05f4:
|
||||||
|
if pos > 0:
|
||||||
|
return _is_script(label[pos - 1], 'Hebrew')
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif cp_value == 0x30fb:
|
||||||
|
for cp in label:
|
||||||
|
if cp == u'\u30fb':
|
||||||
|
continue
|
||||||
|
if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif 0x660 <= cp_value <= 0x669:
|
||||||
|
for cp in label:
|
||||||
|
if 0x6f0 <= ord(cp) <= 0x06f9:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
elif 0x6f0 <= cp_value <= 0x6f9:
|
||||||
|
for cp in label:
|
||||||
|
if 0x660 <= ord(cp) <= 0x0669:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def check_label(label):
|
||||||
|
|
||||||
|
if isinstance(label, (bytes, bytearray)):
|
||||||
|
label = label.decode('utf-8')
|
||||||
|
if len(label) == 0:
|
||||||
|
raise IDNAError('Empty Label')
|
||||||
|
|
||||||
|
check_nfc(label)
|
||||||
|
check_hyphen_ok(label)
|
||||||
|
check_initial_combiner(label)
|
||||||
|
|
||||||
|
for (pos, cp) in enumerate(label):
|
||||||
|
cp_value = ord(cp)
|
||||||
|
if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']):
|
||||||
|
continue
|
||||||
|
elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']):
|
||||||
|
try:
|
||||||
|
if not valid_contextj(label, pos):
|
||||||
|
raise InvalidCodepointContext('Joiner {0} not allowed at position {1} in {2}'.format(
|
||||||
|
_unot(cp_value), pos+1, repr(label)))
|
||||||
|
except ValueError:
|
||||||
|
raise IDNAError('Unknown codepoint adjacent to joiner {0} at position {1} in {2}'.format(
|
||||||
|
_unot(cp_value), pos+1, repr(label)))
|
||||||
|
elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']):
|
||||||
|
if not valid_contexto(label, pos):
|
||||||
|
raise InvalidCodepointContext('Codepoint {0} not allowed at position {1} in {2}'.format(_unot(cp_value), pos+1, repr(label)))
|
||||||
|
else:
|
||||||
|
raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
|
||||||
|
|
||||||
|
check_bidi(label)
|
||||||
|
|
||||||
|
|
||||||
|
def alabel(label):
|
||||||
|
|
||||||
|
try:
|
||||||
|
label = label.encode('ascii')
|
||||||
|
try:
|
||||||
|
ulabel(label)
|
||||||
|
except IDNAError:
|
||||||
|
raise IDNAError('The label {0} is not a valid A-label'.format(label))
|
||||||
|
if not valid_label_length(label):
|
||||||
|
raise IDNAError('Label too long')
|
||||||
|
return label
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if not label:
|
||||||
|
raise IDNAError('No Input')
|
||||||
|
|
||||||
|
label = unicode(label)
|
||||||
|
check_label(label)
|
||||||
|
label = _punycode(label)
|
||||||
|
label = _alabel_prefix + label
|
||||||
|
|
||||||
|
if not valid_label_length(label):
|
||||||
|
raise IDNAError('Label too long')
|
||||||
|
|
||||||
|
return label
|
||||||
|
|
||||||
|
|
||||||
|
def ulabel(label):
|
||||||
|
|
||||||
|
if not isinstance(label, (bytes, bytearray)):
|
||||||
|
try:
|
||||||
|
label = label.encode('ascii')
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
check_label(label)
|
||||||
|
return label
|
||||||
|
|
||||||
|
label = label.lower()
|
||||||
|
if label.startswith(_alabel_prefix):
|
||||||
|
label = label[len(_alabel_prefix):]
|
||||||
|
else:
|
||||||
|
check_label(label)
|
||||||
|
return label.decode('ascii')
|
||||||
|
|
||||||
|
label = label.decode('punycode')
|
||||||
|
check_label(label)
|
||||||
|
return label
|
||||||
|
|
||||||
|
|
||||||
|
def uts46_remap(domain, std3_rules=True, transitional=False):
|
||||||
|
"""Re-map the characters in the string according to UTS46 processing."""
|
||||||
|
from .uts46data import uts46data
|
||||||
|
output = u""
|
||||||
|
try:
|
||||||
|
for pos, char in enumerate(domain):
|
||||||
|
code_point = ord(char)
|
||||||
|
uts46row = uts46data[code_point if code_point < 256 else
|
||||||
|
bisect.bisect_left(uts46data, (code_point, "Z")) - 1]
|
||||||
|
status = uts46row[1]
|
||||||
|
replacement = uts46row[2] if len(uts46row) == 3 else None
|
||||||
|
if (status == "V" or
|
||||||
|
(status == "D" and not transitional) or
|
||||||
|
(status == "3" and not std3_rules and replacement is None)):
|
||||||
|
output += char
|
||||||
|
elif replacement is not None and (status == "M" or
|
||||||
|
(status == "3" and not std3_rules) or
|
||||||
|
(status == "D" and transitional)):
|
||||||
|
output += replacement
|
||||||
|
elif status != "I":
|
||||||
|
raise IndexError()
|
||||||
|
return unicodedata.normalize("NFC", output)
|
||||||
|
except IndexError:
|
||||||
|
raise InvalidCodepoint(
|
||||||
|
"Codepoint {0} not allowed at position {1} in {2}".format(
|
||||||
|
_unot(code_point), pos + 1, repr(domain)))
|
||||||
|
|
||||||
|
|
||||||
|
def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False):
|
||||||
|
|
||||||
|
if isinstance(s, (bytes, bytearray)):
|
||||||
|
s = s.decode("ascii")
|
||||||
|
if uts46:
|
||||||
|
s = uts46_remap(s, std3_rules, transitional)
|
||||||
|
trailing_dot = False
|
||||||
|
result = []
|
||||||
|
if strict:
|
||||||
|
labels = s.split('.')
|
||||||
|
else:
|
||||||
|
labels = _unicode_dots_re.split(s)
|
||||||
|
if not labels or labels == ['']:
|
||||||
|
raise IDNAError('Empty domain')
|
||||||
|
if labels[-1] == '':
|
||||||
|
del labels[-1]
|
||||||
|
trailing_dot = True
|
||||||
|
for label in labels:
|
||||||
|
s = alabel(label)
|
||||||
|
if s:
|
||||||
|
result.append(s)
|
||||||
|
else:
|
||||||
|
raise IDNAError('Empty label')
|
||||||
|
if trailing_dot:
|
||||||
|
result.append(b'')
|
||||||
|
s = b'.'.join(result)
|
||||||
|
if not valid_string_length(s, trailing_dot):
|
||||||
|
raise IDNAError('Domain too long')
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
def decode(s, strict=False, uts46=False, std3_rules=False):
|
||||||
|
|
||||||
|
if isinstance(s, (bytes, bytearray)):
|
||||||
|
s = s.decode("ascii")
|
||||||
|
if uts46:
|
||||||
|
s = uts46_remap(s, std3_rules, False)
|
||||||
|
trailing_dot = False
|
||||||
|
result = []
|
||||||
|
if not strict:
|
||||||
|
labels = _unicode_dots_re.split(s)
|
||||||
|
else:
|
||||||
|
labels = s.split(u'.')
|
||||||
|
if not labels or labels == ['']:
|
||||||
|
raise IDNAError('Empty domain')
|
||||||
|
if not labels[-1]:
|
||||||
|
del labels[-1]
|
||||||
|
trailing_dot = True
|
||||||
|
for label in labels:
|
||||||
|
s = ulabel(label)
|
||||||
|
if s:
|
||||||
|
result.append(s)
|
||||||
|
else:
|
||||||
|
raise IDNAError('Empty label')
|
||||||
|
if trailing_dot:
|
||||||
|
result.append(u'')
|
||||||
|
return u'.'.join(result)
|
1979
lib/idna/idnadata.py
Normal file
1979
lib/idna/idnadata.py
Normal file
File diff suppressed because it is too large
Load diff
53
lib/idna/intranges.py
Normal file
53
lib/idna/intranges.py
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
"""
|
||||||
|
Given a list of integers, made up of (hopefully) a small number of long runs
|
||||||
|
of consecutive integers, compute a representation of the form
|
||||||
|
((start1, end1), (start2, end2) ...). Then answer the question "was x present
|
||||||
|
in the original list?" in time O(log(# runs)).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import bisect
|
||||||
|
|
||||||
|
def intranges_from_list(list_):
|
||||||
|
"""Represent a list of integers as a sequence of ranges:
|
||||||
|
((start_0, end_0), (start_1, end_1), ...), such that the original
|
||||||
|
integers are exactly those x such that start_i <= x < end_i for some i.
|
||||||
|
|
||||||
|
Ranges are encoded as single integers (start << 32 | end), not as tuples.
|
||||||
|
"""
|
||||||
|
|
||||||
|
sorted_list = sorted(list_)
|
||||||
|
ranges = []
|
||||||
|
last_write = -1
|
||||||
|
for i in range(len(sorted_list)):
|
||||||
|
if i+1 < len(sorted_list):
|
||||||
|
if sorted_list[i] == sorted_list[i+1]-1:
|
||||||
|
continue
|
||||||
|
current_range = sorted_list[last_write+1:i+1]
|
||||||
|
ranges.append(_encode_range(current_range[0], current_range[-1] + 1))
|
||||||
|
last_write = i
|
||||||
|
|
||||||
|
return tuple(ranges)
|
||||||
|
|
||||||
|
def _encode_range(start, end):
|
||||||
|
return (start << 32) | end
|
||||||
|
|
||||||
|
def _decode_range(r):
|
||||||
|
return (r >> 32), (r & ((1 << 32) - 1))
|
||||||
|
|
||||||
|
|
||||||
|
def intranges_contain(int_, ranges):
|
||||||
|
"""Determine if `int_` falls into one of the ranges in `ranges`."""
|
||||||
|
tuple_ = _encode_range(int_, 0)
|
||||||
|
pos = bisect.bisect_left(ranges, tuple_)
|
||||||
|
# we could be immediately ahead of a tuple (start, end)
|
||||||
|
# with start < int_ <= end
|
||||||
|
if pos > 0:
|
||||||
|
left, right = _decode_range(ranges[pos-1])
|
||||||
|
if left <= int_ < right:
|
||||||
|
return True
|
||||||
|
# or we could be immediately behind a tuple (int_, end)
|
||||||
|
if pos < len(ranges):
|
||||||
|
left, _ = _decode_range(ranges[pos])
|
||||||
|
if left == int_:
|
||||||
|
return True
|
||||||
|
return False
|
2
lib/idna/package_data.py
Normal file
2
lib/idna/package_data.py
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
__version__ = '2.7'
|
||||||
|
|
8205
lib/idna/uts46data.py
Normal file
8205
lib/idna/uts46data.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -1 +1 @@
|
||||||
Cheetah>=2.1.0
|
Cheetah3>=3.1.0
|
Loading…
Reference in a new issue