#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import tornado.escape
from tornado.escape import utf8, xhtml_escape, xhtml_unescape, url_escape, url_unescape, to_unicode, json_decode, json_encode, squeeze, recursive_unicode
from tornado.util import u, unicode_type
from tornado.test.util import unittest
linkify_tests = [
# (input, linkify_kwargs, expected_output)
("hello http://world.com/!", {},
u('hello http://world.com/!')),
("hello http://world.com/with?param=true&stuff=yes", {},
u('hello http://world.com/with?param=true&stuff=yes')),
# an opened paren followed by many chars killed Gruber's regex
("http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", {},
u('http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')),
# as did too many dots at the end
("http://url.com/withmany.......................................", {},
u('http://url.com/withmany.......................................')),
("http://url.com/withmany((((((((((((((((((((((((((((((((((a)", {},
u('http://url.com/withmany((((((((((((((((((((((((((((((((((a)')),
# some examples from http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
# plus a fex extras (such as multiple parentheses).
("http://foo.com/blah_blah", {},
u('http://foo.com/blah_blah')),
("http://foo.com/blah_blah/", {},
u('http://foo.com/blah_blah/')),
("(Something like http://foo.com/blah_blah)", {},
u('(Something like http://foo.com/blah_blah)')),
("http://foo.com/blah_blah_(wikipedia)", {},
u('http://foo.com/blah_blah_(wikipedia)')),
("http://foo.com/blah_(blah)_(wikipedia)_blah", {},
u('http://foo.com/blah_(blah)_(wikipedia)_blah')),
("(Something like http://foo.com/blah_blah_(wikipedia))", {},
u('(Something like http://foo.com/blah_blah_(wikipedia))')),
("http://foo.com/blah_blah.", {},
u('http://foo.com/blah_blah.')),
("http://foo.com/blah_blah/.", {},
u('http://foo.com/blah_blah/.')),
("", {},
u('<http://foo.com/blah_blah>')),
("", {},
u('<http://foo.com/blah_blah/>')),
("http://foo.com/blah_blah,", {},
u('http://foo.com/blah_blah,')),
("http://www.example.com/wpstyle/?p=364.", {},
u('http://www.example.com/wpstyle/?p=364.')),
("rdar://1234",
{"permitted_protocols": ["http", "rdar"]},
u('rdar://1234')),
("rdar:/1234",
{"permitted_protocols": ["rdar"]},
u('rdar:/1234')),
("http://userid:password@example.com:8080", {},
u('http://userid:password@example.com:8080')),
("http://userid@example.com", {},
u('http://userid@example.com')),
("http://userid@example.com:8080", {},
u('http://userid@example.com:8080')),
("http://userid:password@example.com", {},
u('http://userid:password@example.com')),
("message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e",
{"permitted_protocols": ["http", "message"]},
u('message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e')),
(u("http://\u27a1.ws/\u4a39"), {},
u('http://\u27a1.ws/\u4a39')),
("http://example.com", {},
u('<tag>http://example.com</tag>')),
("Just a www.example.com link.", {},
u('Just a www.example.com link.')),
("Just a www.example.com link.",
{"require_protocol": True},
u('Just a www.example.com link.')),
("A http://reallylong.com/link/that/exceedsthelenglimit.html",
{"require_protocol": True, "shorten": True},
u('A http://reallylong.com/link...')),
("A http://reallylongdomainnamethatwillbetoolong.com/hi!",
{"shorten": True},
u('A http://reallylongdomainnametha...!')),
("A file:///passwords.txt and http://web.com link", {},
u('A file:///passwords.txt and http://web.com link')),
("A file:///passwords.txt and http://web.com link",
{"permitted_protocols": ["file"]},
u('A file:///passwords.txt and http://web.com link')),
("www.external-link.com",
{"extra_params": 'rel="nofollow" class="external"'},
u('www.external-link.com')),
("www.external-link.com and www.internal-link.com/blogs extra",
{"extra_params": lambda href: 'class="internal"' if href.startswith("http://www.internal-link.com") else 'rel="nofollow" class="external"'},
u('www.external-link.com and www.internal-link.com/blogs extra')),
("www.external-link.com",
{"extra_params": lambda href: ' rel="nofollow" class="external" '},
u('www.external-link.com')),
]
class EscapeTestCase(unittest.TestCase):
def test_linkify(self):
for text, kwargs, html in linkify_tests:
linked = tornado.escape.linkify(text, **kwargs)
self.assertEqual(linked, html)
def test_xhtml_escape(self):
tests = [
("", "<foo>"),
(u(""), u("<foo>")),
(b"", b"<foo>"),
("<>&\"'", "<>&"'"),
("&", "&"),
(u("<\u00e9>"), u("<\u00e9>")),
(b"<\xc3\xa9>", b"<\xc3\xa9>"),
]
for unescaped, escaped in tests:
self.assertEqual(utf8(xhtml_escape(unescaped)), utf8(escaped))
self.assertEqual(utf8(unescaped), utf8(xhtml_unescape(escaped)))
def test_url_escape_unicode(self):
tests = [
# byte strings are passed through as-is
(u('\u00e9').encode('utf8'), '%C3%A9'),
(u('\u00e9').encode('latin1'), '%E9'),
# unicode strings become utf8
(u('\u00e9'), '%C3%A9'),
]
for unescaped, escaped in tests:
self.assertEqual(url_escape(unescaped), escaped)
def test_url_unescape_unicode(self):
tests = [
('%C3%A9', u('\u00e9'), 'utf8'),
('%C3%A9', u('\u00c3\u00a9'), 'latin1'),
('%C3%A9', utf8(u('\u00e9')), None),
]
for escaped, unescaped, encoding in tests:
# input strings to url_unescape should only contain ascii
# characters, but make sure the function accepts both byte
# and unicode strings.
self.assertEqual(url_unescape(to_unicode(escaped), encoding), unescaped)
self.assertEqual(url_unescape(utf8(escaped), encoding), unescaped)
def test_url_escape_quote_plus(self):
unescaped = '+ #%'
plus_escaped = '%2B+%23%25'
escaped = '%2B%20%23%25'
self.assertEqual(url_escape(unescaped), plus_escaped)
self.assertEqual(url_escape(unescaped, plus=False), escaped)
self.assertEqual(url_unescape(plus_escaped), unescaped)
self.assertEqual(url_unescape(escaped, plus=False), unescaped)
self.assertEqual(url_unescape(plus_escaped, encoding=None),
utf8(unescaped))
self.assertEqual(url_unescape(escaped, encoding=None, plus=False),
utf8(unescaped))
def test_escape_return_types(self):
# On python2 the escape methods should generally return the same
# type as their argument
self.assertEqual(type(xhtml_escape("foo")), str)
self.assertEqual(type(xhtml_escape(u("foo"))), unicode_type)
def test_json_decode(self):
# json_decode accepts both bytes and unicode, but strings it returns
# are always unicode.
self.assertEqual(json_decode(b'"foo"'), u("foo"))
self.assertEqual(json_decode(u('"foo"')), u("foo"))
# Non-ascii bytes are interpreted as utf8
self.assertEqual(json_decode(utf8(u('"\u00e9"'))), u("\u00e9"))
def test_json_encode(self):
# json deals with strings, not bytes. On python 2 byte strings will
# convert automatically if they are utf8; on python 3 byte strings
# are not allowed.
self.assertEqual(json_decode(json_encode(u("\u00e9"))), u("\u00e9"))
if bytes is str:
self.assertEqual(json_decode(json_encode(utf8(u("\u00e9")))), u("\u00e9"))
self.assertRaises(UnicodeDecodeError, json_encode, b"\xe9")
def test_squeeze(self):
self.assertEqual(squeeze(u('sequences of whitespace chars')), u('sequences of whitespace chars'))
def test_recursive_unicode(self):
tests = {
'dict': {b"foo": b"bar"},
'list': [b"foo", b"bar"],
'tuple': (b"foo", b"bar"),
'bytes': b"foo"
}
self.assertEqual(recursive_unicode(tests['dict']), {u("foo"): u("bar")})
self.assertEqual(recursive_unicode(tests['list']), [u("foo"), u("bar")])
self.assertEqual(recursive_unicode(tests['tuple']), (u("foo"), u("bar")))
self.assertEqual(recursive_unicode(tests['bytes']), u("foo"))