Merge pull request #491 from JackDandy/feature/UpdateTornado

Update Tornado Web Server 4.2 to 4.3.dev1 (1b6157d).
This commit is contained in:
JackDandy 2015-08-22 01:25:07 +01:00
commit f9f744b501
23 changed files with 775 additions and 277 deletions

View file

@ -21,11 +21,14 @@
* Add a postprocess folder name validation * Add a postprocess folder name validation
* Update Requests library to 2.7.0 (5d6d1bc) * Update Requests library to 2.7.0 (5d6d1bc)
* Update SimpleJSON library 3.7.3 to 3.8.0 (a37a9bd) * Update SimpleJSON library 3.7.3 to 3.8.0 (a37a9bd)
* Update Tornado Web Server 4.2 to 4.3.dev1 (1b6157d)
* Update change to suppress reporting of Tornado exception error 1 to updated package (ref:hacks.txt)
* Update fix for API response header for JSON content type and the return of JSONP data to updated package (ref:hacks.txt)
### 0.10.0 (2015-08-06 11:05:00 UTC) ### 0.10.0 (2015-08-06 11:05:00 UTC)
* Remove EZRSS provider * Remove EZRSS provider
* Update Tornado webserver to 4.2 (fdfaf3d) * Update Tornado Web Server to 4.2 (fdfaf3d)
* Update change to suppress reporting of Tornado exception error 1 to updated package (ref:hacks.txt) * Update change to suppress reporting of Tornado exception error 1 to updated package (ref:hacks.txt)
* Update fix for API response header for JSON content type and the return of JSONP data to updated package (ref:hacks.txt) * Update fix for API response header for JSON content type and the return of JSONP data to updated package (ref:hacks.txt)
* Update Requests library 2.6.2 to 2.7.0 (8b5e457) * Update Requests library 2.6.2 to 2.7.0 (8b5e457)
@ -141,7 +144,7 @@
### 0.9.0 (2015-05-18 14:33:00 UTC) ### 0.9.0 (2015-05-18 14:33:00 UTC)
* Update Tornado webserver to 4.2.dev1 (609dbb9) * Update Tornado Web Server to 4.2.dev1 (609dbb9)
* Update change to suppress reporting of Tornado exception error 1 to updated package as listed in hacks.txt * Update change to suppress reporting of Tornado exception error 1 to updated package as listed in hacks.txt
* Update fix for API response header for JSON content type and the return of JSONP data to updated package as listed in hacks.txt * Update fix for API response header for JSON content type and the return of JSONP data to updated package as listed in hacks.txt
* Change network names to only display on top line of Day by Day layout on Episode View * Change network names to only display on top line of Day by Day layout on Episode View
@ -655,7 +658,7 @@
* Add return code from hardlinking error to log * Add return code from hardlinking error to log
* Fix ABD regex for certain filenames * Fix ABD regex for certain filenames
* Change miscellaneous UI fixes * Change miscellaneous UI fixes
* Update Tornado webserver to 4.1dev1 and add the certifi lib dependency * Update Tornado Web Server to 4.1dev1 and add the certifi lib dependency
* Fix trending shows page from loading full size poster images * Fix trending shows page from loading full size poster images
* Add "Archive on first match" to Manage, Mass Update, Edit Selected page * Add "Archive on first match" to Manage, Mass Update, Edit Selected page
* Fix searching IPTorrentsProvider * Fix searching IPTorrentsProvider

View file

@ -25,5 +25,5 @@ from __future__ import absolute_import, division, print_function, with_statement
# is zero for an official release, positive for a development branch, # is zero for an official release, positive for a development branch,
# or negative for a release candidate or beta (after the base version # or negative for a release candidate or beta (after the base version
# number has been incremented) # number has been incremented)
version = "4.2" version = "4.3.dev1"
version_info = (4, 2, 0, 0) version_info = (4, 3, 0, -100)

92
tornado/_locale_data.py Normal file
View file

@ -0,0 +1,92 @@
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Data used by the tornado.locale module."""
# NOTE: This file is supposed to contain unicode strings, which is
# exactly what you'd get with e.g. u"Español" in most python versions.
# However, Python 3.2 doesn't support the u"" syntax, so we use a u()
# function instead. tornado.util.u cannot be used because it doesn't
# support non-ascii characters on python 2.
# When we drop support for Python 3.2, we can remove the parens
# and make these plain unicode strings.
from tornado.escape import to_unicode as u
LOCALE_NAMES = {
"af_ZA": {"name_en": u("Afrikaans"), "name": u("Afrikaans")},
"am_ET": {"name_en": u("Amharic"), "name": u("አማርኛ")},
"ar_AR": {"name_en": u("Arabic"), "name": u("العربية")},
"bg_BG": {"name_en": u("Bulgarian"), "name": u("Български")},
"bn_IN": {"name_en": u("Bengali"), "name": u("বাংলা")},
"bs_BA": {"name_en": u("Bosnian"), "name": u("Bosanski")},
"ca_ES": {"name_en": u("Catalan"), "name": u("Català")},
"cs_CZ": {"name_en": u("Czech"), "name": u("Čeština")},
"cy_GB": {"name_en": u("Welsh"), "name": u("Cymraeg")},
"da_DK": {"name_en": u("Danish"), "name": u("Dansk")},
"de_DE": {"name_en": u("German"), "name": u("Deutsch")},
"el_GR": {"name_en": u("Greek"), "name": u("Ελληνικά")},
"en_GB": {"name_en": u("English (UK)"), "name": u("English (UK)")},
"en_US": {"name_en": u("English (US)"), "name": u("English (US)")},
"es_ES": {"name_en": u("Spanish (Spain)"), "name": u("Español (España)")},
"es_LA": {"name_en": u("Spanish"), "name": u("Español")},
"et_EE": {"name_en": u("Estonian"), "name": u("Eesti")},
"eu_ES": {"name_en": u("Basque"), "name": u("Euskara")},
"fa_IR": {"name_en": u("Persian"), "name": u("فارسی")},
"fi_FI": {"name_en": u("Finnish"), "name": u("Suomi")},
"fr_CA": {"name_en": u("French (Canada)"), "name": u("Français (Canada)")},
"fr_FR": {"name_en": u("French"), "name": u("Français")},
"ga_IE": {"name_en": u("Irish"), "name": u("Gaeilge")},
"gl_ES": {"name_en": u("Galician"), "name": u("Galego")},
"he_IL": {"name_en": u("Hebrew"), "name": u("עברית")},
"hi_IN": {"name_en": u("Hindi"), "name": u("हिन्दी")},
"hr_HR": {"name_en": u("Croatian"), "name": u("Hrvatski")},
"hu_HU": {"name_en": u("Hungarian"), "name": u("Magyar")},
"id_ID": {"name_en": u("Indonesian"), "name": u("Bahasa Indonesia")},
"is_IS": {"name_en": u("Icelandic"), "name": u("Íslenska")},
"it_IT": {"name_en": u("Italian"), "name": u("Italiano")},
"ja_JP": {"name_en": u("Japanese"), "name": u("日本語")},
"ko_KR": {"name_en": u("Korean"), "name": u("한국어")},
"lt_LT": {"name_en": u("Lithuanian"), "name": u("Lietuvių")},
"lv_LV": {"name_en": u("Latvian"), "name": u("Latviešu")},
"mk_MK": {"name_en": u("Macedonian"), "name": u("Македонски")},
"ml_IN": {"name_en": u("Malayalam"), "name": u("മലയാളം")},
"ms_MY": {"name_en": u("Malay"), "name": u("Bahasa Melayu")},
"nb_NO": {"name_en": u("Norwegian (bokmal)"), "name": u("Norsk (bokmål)")},
"nl_NL": {"name_en": u("Dutch"), "name": u("Nederlands")},
"nn_NO": {"name_en": u("Norwegian (nynorsk)"), "name": u("Norsk (nynorsk)")},
"pa_IN": {"name_en": u("Punjabi"), "name": u("ਪੰਜਾਬੀ")},
"pl_PL": {"name_en": u("Polish"), "name": u("Polski")},
"pt_BR": {"name_en": u("Portuguese (Brazil)"), "name": u("Português (Brasil)")},
"pt_PT": {"name_en": u("Portuguese (Portugal)"), "name": u("Português (Portugal)")},
"ro_RO": {"name_en": u("Romanian"), "name": u("Română")},
"ru_RU": {"name_en": u("Russian"), "name": u("Русский")},
"sk_SK": {"name_en": u("Slovak"), "name": u("Slovenčina")},
"sl_SI": {"name_en": u("Slovenian"), "name": u("Slovenščina")},
"sq_AL": {"name_en": u("Albanian"), "name": u("Shqip")},
"sr_RS": {"name_en": u("Serbian"), "name": u("Српски")},
"sv_SE": {"name_en": u("Swedish"), "name": u("Svenska")},
"sw_KE": {"name_en": u("Swahili"), "name": u("Kiswahili")},
"ta_IN": {"name_en": u("Tamil"), "name": u("தமிழ்")},
"te_IN": {"name_en": u("Telugu"), "name": u("తెలుగు")},
"th_TH": {"name_en": u("Thai"), "name": u("ภาษาไทย")},
"tl_PH": {"name_en": u("Filipino"), "name": u("Filipino")},
"tr_TR": {"name_en": u("Turkish"), "name": u("Türkçe")},
"uk_UA": {"name_en": u("Ukraini "), "name": u("Українська")},
"vi_VN": {"name_en": u("Vietnamese"), "name": u("Tiếng Việt")},
"zh_CN": {"name_en": u("Chinese (Simplified)"), "name": u("中文(简体)")},
"zh_TW": {"name_en": u("Chinese (Traditional)"), "name": u("中文(繁體)")},
}

View file

@ -621,6 +621,72 @@ class OAuth2Mixin(object):
args.update(extra_params) args.update(extra_params)
return url_concat(url, args) return url_concat(url, args)
@_auth_return_future
def oauth2_request(self, url, callback, access_token=None,
post_args=None, **args):
"""Fetches the given URL auth an OAuth2 access token.
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
Example usage:
..testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.oauth2_request(
"https://graph.facebook.com/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
.. versionadded:: 4.3
"""
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(args)
if all_args:
url += "?" + urllib_parse.urlencode(all_args)
callback = functools.partial(self._on_oauth2_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_oauth2_request(self, future, response):
if response.error:
future.set_exception(AuthError("Error response %s fetching %s" %
(response.error, response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
.. versionadded:: 4.3
"""
return httpclient.AsyncHTTPClient()
class TwitterMixin(OAuthMixin): class TwitterMixin(OAuthMixin):
"""Twitter OAuth authentication. """Twitter OAuth authentication.
@ -791,12 +857,21 @@ class GoogleOAuth2Mixin(OAuth2Mixin):
""" """
_OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth" _OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth"
_OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token" _OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
_OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo"
_OAUTH_NO_CALLBACKS = False _OAUTH_NO_CALLBACKS = False
_OAUTH_SETTINGS_KEY = 'google_oauth' _OAUTH_SETTINGS_KEY = 'google_oauth'
@_auth_return_future @_auth_return_future
def get_authenticated_user(self, redirect_uri, code, callback): def get_authenticated_user(self, redirect_uri, code, callback):
"""Handles the login for the Google user, returning a user object. """Handles the login for the Google user, returning an access token.
The result is a dictionary containing an ``access_token`` field
([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)).
Unlike other ``get_authenticated_user`` methods in this package,
this method does not return any additional information about the user.
The returned access token can be used with `OAuth2Mixin.oauth2_request`
to request additional information (perhaps from
``https://www.googleapis.com/oauth2/v2/userinfo``)
Example usage: Example usage:
@ -807,10 +882,14 @@ class GoogleOAuth2Mixin(OAuth2Mixin):
@tornado.gen.coroutine @tornado.gen.coroutine
def get(self): def get(self):
if self.get_argument('code', False): if self.get_argument('code', False):
user = yield self.get_authenticated_user( access = yield self.get_authenticated_user(
redirect_uri='http://your.site.com/auth/google', redirect_uri='http://your.site.com/auth/google',
code=self.get_argument('code')) code=self.get_argument('code'))
# Save the user with e.g. set_secure_cookie user = yield self.oauth2_request(
"https://www.googleapis.com/oauth2/v1/userinfo",
access_token=access["access_token"])
# Save the user and access token with
# e.g. set_secure_cookie.
else: else:
yield self.authorize_redirect( yield self.authorize_redirect(
redirect_uri='http://your.site.com/auth/google', redirect_uri='http://your.site.com/auth/google',
@ -845,14 +924,6 @@ class GoogleOAuth2Mixin(OAuth2Mixin):
args = escape.json_decode(response.body) args = escape.json_decode(response.body)
future.set_result(args) future.set_result(args)
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class FacebookGraphMixin(OAuth2Mixin): class FacebookGraphMixin(OAuth2Mixin):
"""Facebook authentication using the new Graph API and OAuth2.""" """Facebook authentication using the new Graph API and OAuth2."""
@ -983,40 +1054,16 @@ class FacebookGraphMixin(OAuth2Mixin):
The given path is relative to ``self._FACEBOOK_BASE_URL``, The given path is relative to ``self._FACEBOOK_BASE_URL``,
by default "https://graph.facebook.com". by default "https://graph.facebook.com".
This method is a wrapper around `OAuth2Mixin.oauth2_request`;
the only difference is that this method takes a relative path,
while ``oauth2_request`` takes a complete url.
.. versionchanged:: 3.1 .. versionchanged:: 3.1
Added the ability to override ``self._FACEBOOK_BASE_URL``. Added the ability to override ``self._FACEBOOK_BASE_URL``.
""" """
url = self._FACEBOOK_BASE_URL + path url = self._FACEBOOK_BASE_URL + path
all_args = {} return self.oauth2_request(url, callback, access_token,
if access_token: post_args, **args)
all_args["access_token"] = access_token
all_args.update(args)
if all_args:
url += "?" + urllib_parse.urlencode(all_args)
callback = functools.partial(self._on_facebook_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_facebook_request(self, future, response):
if response.error:
future.set_exception(AuthError("Error response %s fetching %s" %
(response.error, response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
def _oauth_signature(consumer_token, method, url, parameters={}, token=None): def _oauth_signature(consumer_token, method, url, parameters={}, token=None):

View file

@ -289,11 +289,16 @@ def main():
runpy.run_module(module, run_name="__main__", alter_sys=True) runpy.run_module(module, run_name="__main__", alter_sys=True)
elif mode == "script": elif mode == "script":
with open(script) as f: with open(script) as f:
# Execute the script in our namespace instead of creating
# a new one so that something that tries to import __main__
# (e.g. the unittest module) will see names defined in the
# script instead of just those defined in this module.
global __file__ global __file__
__file__ = script __file__ = script
# Use globals as our "locals" dictionary so that # If __package__ is defined, imports may be incorrectly
# something that tries to import __main__ (e.g. the unittest # interpreted as relative to this module.
# module) will see the right things. global __package__
del __package__
exec_in(f.read(), globals(), globals()) exec_in(f.read(), globals(), globals())
except SystemExit as e: except SystemExit as e:
logging.basicConfig() logging.basicConfig()

View file

@ -16,16 +16,16 @@
"""Utilities for working with threads and ``Futures``. """Utilities for working with threads and ``Futures``.
``Futures`` are a pattern for concurrent programming introduced in ``Futures`` are a pattern for concurrent programming introduced in
Python 3.2 in the `concurrent.futures` package (this package has also Python 3.2 in the `concurrent.futures` package. This package defines
been backported to older versions of Python and can be installed with a mostly-compatible `Future` class designed for use from coroutines,
``pip install futures``). Tornado will use `concurrent.futures.Future` if as well as some utility functions for interacting with the
it is available; otherwise it will use a compatible class defined in this `concurrent.futures` package.
module.
""" """
from __future__ import absolute_import, division, print_function, with_statement from __future__ import absolute_import, division, print_function, with_statement
import functools import functools
import platform import platform
import textwrap
import traceback import traceback
import sys import sys
@ -170,6 +170,14 @@ class Future(object):
self._callbacks = [] self._callbacks = []
# Implement the Python 3.5 Awaitable protocol if possible
# (we can't use return and yield together until py33).
if sys.version_info >= (3, 3):
exec(textwrap.dedent("""
def __await__(self):
return (yield self)
"""))
def cancel(self): def cancel(self):
"""Cancel the operation, if possible. """Cancel the operation, if possible.

View file

@ -80,8 +80,8 @@ import collections
import functools import functools
import itertools import itertools
import sys import sys
import textwrap
import types import types
import weakref
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.ioloop import IOLoop from tornado.ioloop import IOLoop
@ -98,6 +98,22 @@ except ImportError as e:
singledispatch = None singledispatch = None
try:
from collections.abc import Generator as GeneratorType # py35+
except ImportError:
from types import GeneratorType
try:
from inspect import isawaitable # py35+
except ImportError:
def isawaitable(x): return False
try:
import builtins # py3
except ImportError:
import __builtin__ as builtins
class KeyReuseError(Exception): class KeyReuseError(Exception):
pass pass
@ -202,6 +218,10 @@ def _make_coroutine_wrapper(func, replace_callback):
argument, so we cannot simply implement ``@engine`` in terms of argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``. ``@coroutine``.
""" """
# On Python 3.5, set the coroutine flag on our generator, to allow it
# to be used with 'await'.
if hasattr(types, 'coroutine'):
func = types.coroutine(func)
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
future = TracebackFuture() future = TracebackFuture()
@ -219,7 +239,7 @@ def _make_coroutine_wrapper(func, replace_callback):
future.set_exc_info(sys.exc_info()) future.set_exc_info(sys.exc_info())
return future return future
else: else:
if isinstance(result, types.GeneratorType): if isinstance(result, GeneratorType):
# Inline the first iteration of Runner.run. This lets us # Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine # avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to # never actually yields, which in turn allows us to
@ -318,7 +338,22 @@ class WaitIterator(object):
arguments were used in the construction of the `WaitIterator`, arguments were used in the construction of the `WaitIterator`,
``current_index`` will use the corresponding keyword). ``current_index`` will use the corresponding keyword).
On Python 3.5, `WaitIterator` implements the async iterator
protocol, so it can be used with the ``async for`` statement (note
that in this version the entire iteration is aborted if any value
raises an exception, while the previous example can continue past
individual errors)::
async for result in gen.WaitIterator(future1, future2):
print("Result {} received from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
.. versionadded:: 4.1 .. versionadded:: 4.1
.. versionchanged:: 4.3
Added ``async for`` support in Python 3.5.
""" """
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
if args and kwargs: if args and kwargs:
@ -375,6 +410,16 @@ class WaitIterator(object):
self.current_future = done self.current_future = done
self.current_index = self._unfinished.pop(done) self.current_index = self._unfinished.pop(done)
@coroutine
def __aiter__(self):
raise Return(self)
def __anext__(self):
if self.done():
# Lookup by name to silence pyflakes on older versions.
raise getattr(builtins, 'StopAsyncIteration')()
return self.next()
class YieldPoint(object): class YieldPoint(object):
"""Base class for objects that may be yielded from the generator. """Base class for objects that may be yielded from the generator.
@ -609,11 +654,12 @@ class Multi(YieldPoint):
def multi_future(children, quiet_exceptions=()): def multi_future(children, quiet_exceptions=()):
"""Wait for multiple asynchronous futures in parallel. """Wait for multiple asynchronous futures in parallel.
Takes a list of ``Futures`` (but *not* other ``YieldPoints``) and returns Takes a list of ``Futures`` or other yieldable objects (with the
a new Future that resolves when all the other Futures are done. exception of the legacy `.YieldPoint` interfaces) and returns a
If all the ``Futures`` succeeded, the returned Future's result is a list new Future that resolves when all the other Futures are done. If
of their results. If any failed, the returned Future raises the exception all the ``Futures`` succeeded, the returned Future's result is a
of the first one to fail. list of their results. If any failed, the returned Future raises
the exception of the first one to fail.
Instead of a list, the argument may also be a dictionary whose values are Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same Futures, in which case a parallel dictionary is returned mapping the same
@ -634,12 +680,16 @@ def multi_future(children, quiet_exceptions=()):
If multiple ``Futures`` fail, any exceptions after the first (which is If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions`` raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types. argument to suppress this logging for selected exception types.
.. versionchanged:: 4.3
Added support for other yieldable objects.
""" """
if isinstance(children, dict): if isinstance(children, dict):
keys = list(children.keys()) keys = list(children.keys())
children = children.values() children = children.values()
else: else:
keys = None keys = None
children = list(map(convert_yielded, children))
assert all(is_future(i) for i in children) assert all(is_future(i) for i in children)
unfinished_children = set(children) unfinished_children = set(children)
@ -1001,6 +1051,16 @@ def _argument_adapter(callback):
callback(None) callback(None)
return wrapper return wrapper
if sys.version_info >= (3, 3):
exec(textwrap.dedent("""
@coroutine
def _wrap_awaitable(x):
return (yield from x)
"""))
else:
def _wrap_awaitable(x):
raise NotImplementedError()
def convert_yielded(yielded): def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`. """Convert a yielded object into a `.Future`.
@ -1022,6 +1082,8 @@ def convert_yielded(yielded):
return multi_future(yielded) return multi_future(yielded)
elif is_future(yielded): elif is_future(yielded):
return yielded return yielded
elif isawaitable(yielded):
return _wrap_awaitable(yielded)
else: else:
raise BadYieldError("yielded unknown object %r" % (yielded,)) raise BadYieldError("yielded unknown object %r" % (yielded,))

View file

@ -188,7 +188,6 @@ class HTTPServer(TCPServer, Configurable,
class _HTTPRequestContext(object): class _HTTPRequestContext(object):
def __init__(self, stream, address, protocol): def __init__(self, stream, address, protocol):
self.address = address self.address = address
self.protocol = protocol
# Save the socket's address family now so we know how to # Save the socket's address family now so we know how to
# interpret self.address even after the stream is closed # interpret self.address even after the stream is closed
# and its socket attribute replaced with None. # and its socket attribute replaced with None.

View file

@ -242,6 +242,15 @@ class HTTPHeaders(dict):
# effectively a deep copy. # effectively a deep copy.
return self.copy() return self.copy()
def __reduce_ex__(self, v):
# We must override dict.__reduce_ex__ to pickle ourselves
# correctly.
return HTTPHeaders, (), list(self.get_all())
def __setstate__(self, state):
for k, v in state:
self.add(k, v)
class HTTPServerRequest(object): class HTTPServerRequest(object):
"""A single HTTP request. """A single HTTP request.

View file

@ -249,7 +249,7 @@ class IOLoop(Configurable):
if IOLoop.current(instance=False) is None: if IOLoop.current(instance=False) is None:
self.make_current() self.make_current()
elif make_current: elif make_current:
if IOLoop.current(instance=False) is None: if IOLoop.current(instance=False) is not None:
raise RuntimeError("current IOLoop already exists") raise RuntimeError("current IOLoop already exists")
self.make_current() self.make_current()

View file

@ -89,8 +89,16 @@ class StreamClosedError(IOError):
Note that the close callback is scheduled to run *after* other Note that the close callback is scheduled to run *after* other
callbacks on the stream (to allow for buffered data to be processed), callbacks on the stream (to allow for buffered data to be processed),
so you may see this error before you see the close callback. so you may see this error before you see the close callback.
The ``real_error`` attribute contains the underlying error that caused
the stream to close (if any).
.. versionchanged:: 4.3
Added the ``real_error`` attribute.
""" """
pass def __init__(self, real_error=None):
super(StreamClosedError, self).__init__('Stream is closed')
self.real_error = real_error
class UnsatisfiableReadError(Exception): class UnsatisfiableReadError(Exception):
@ -344,7 +352,8 @@ class BaseIOStream(object):
try: try:
self._try_inline_read() self._try_inline_read()
except: except:
future.add_done_callback(lambda f: f.exception()) if future is not None:
future.add_done_callback(lambda f: f.exception())
raise raise
return future return future
@ -446,13 +455,7 @@ class BaseIOStream(object):
futures.append(self._ssl_connect_future) futures.append(self._ssl_connect_future)
self._ssl_connect_future = None self._ssl_connect_future = None
for future in futures: for future in futures:
if self._is_connreset(self.error): future.set_exception(StreamClosedError(real_error=self.error))
# Treat connection resets as closed connections so
# clients only have to catch one kind of exception
# to avoid logging.
future.set_exception(StreamClosedError())
else:
future.set_exception(self.error or StreamClosedError())
if self._close_callback is not None: if self._close_callback is not None:
cb = self._close_callback cb = self._close_callback
self._close_callback = None self._close_callback = None
@ -646,7 +649,7 @@ class BaseIOStream(object):
raise raise
except Exception as e: except Exception as e:
if 1 != e.errno: if 1 != e.errno:
gen_log.warning("error on read", exc_info=True) gen_log.warning("error on read: %s" % e)
self.close(exc_info=True) self.close(exc_info=True)
return return
if pos is not None: if pos is not None:
@ -876,7 +879,7 @@ class BaseIOStream(object):
def _check_closed(self): def _check_closed(self):
if self.closed(): if self.closed():
raise StreamClosedError("Stream is closed") raise StreamClosedError(real_error=self.error)
def _maybe_add_error_listener(self): def _maybe_add_error_listener(self):
# This method is part of an optimization: to detect a connection that # This method is part of an optimization: to detect a connection that
@ -1149,6 +1152,15 @@ class IOStream(BaseIOStream):
def close_callback(): def close_callback():
if not future.done(): if not future.done():
# Note that unlike most Futures returned by IOStream,
# this one passes the underlying error through directly
# instead of wrapping everything in a StreamClosedError
# with a real_error attribute. This is because once the
# connection is established it's more helpful to raise
# the SSLError directly than to hide it behind a
# StreamClosedError (and the client is expecting SSL
# issues rather than network issues since this method is
# named start_tls).
future.set_exception(ssl_stream.error or StreamClosedError()) future.set_exception(ssl_stream.error or StreamClosedError())
if orig_close_callback is not None: if orig_close_callback is not None:
orig_close_callback() orig_close_callback()
@ -1312,8 +1324,8 @@ class SSLIOStream(IOStream):
return False return False
try: try:
ssl_match_hostname(peercert, self._server_hostname) ssl_match_hostname(peercert, self._server_hostname)
except SSLCertificateError: except SSLCertificateError as e:
gen_log.warning("Invalid SSL certificate", exc_info=True) gen_log.warning("Invalid SSL certificate: %s" % e)
return False return False
else: else:
return True return True

View file

@ -41,8 +41,10 @@ the `Locale.translate` method will simply return the original string.
from __future__ import absolute_import, division, print_function, with_statement from __future__ import absolute_import, division, print_function, with_statement
import codecs
import csv import csv
import datetime import datetime
from io import BytesIO
import numbers import numbers
import os import os
import re import re
@ -51,13 +53,14 @@ from tornado import escape
from tornado.log import gen_log from tornado.log import gen_log
from tornado.util import u from tornado.util import u
from tornado._locale_data import LOCALE_NAMES
_default_locale = "en_US" _default_locale = "en_US"
_translations = {} _translations = {}
_supported_locales = frozenset([_default_locale]) _supported_locales = frozenset([_default_locale])
_use_gettext = False _use_gettext = False
CONTEXT_SEPARATOR = "\x04" CONTEXT_SEPARATOR = "\x04"
def get(*locale_codes): def get(*locale_codes):
"""Returns the closest match for the given locale codes. """Returns the closest match for the given locale codes.
@ -86,7 +89,7 @@ def set_default_locale(code):
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) _supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
def load_translations(directory): def load_translations(directory, encoding=None):
"""Loads translations from CSV files in a directory. """Loads translations from CSV files in a directory.
Translations are strings with optional Python-style named placeholders Translations are strings with optional Python-style named placeholders
@ -106,12 +109,20 @@ def load_translations(directory):
The file is read using the `csv` module in the default "excel" dialect. The file is read using the `csv` module in the default "excel" dialect.
In this format there should not be spaces after the commas. In this format there should not be spaces after the commas.
If no ``encoding`` parameter is given, the encoding will be
detected automatically (among UTF-8 and UTF-16) if the file
contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM
is present.
Example translation ``es_LA.csv``:: Example translation ``es_LA.csv``::
"I love you","Te amo" "I love you","Te amo"
"%(name)s liked this","A %(name)s les gustó esto","plural" "%(name)s liked this","A %(name)s les gustó esto","plural"
"%(name)s liked this","A %(name)s le gustó esto","singular" "%(name)s liked this","A %(name)s le gustó esto","singular"
.. versionchanged:: 4.3
Added ``encoding`` parameter. Added support for BOM-based encoding
detection, UTF-16, and UTF-8-with-BOM.
""" """
global _translations global _translations
global _supported_locales global _supported_locales
@ -125,13 +136,29 @@ def load_translations(directory):
os.path.join(directory, path)) os.path.join(directory, path))
continue continue
full_path = os.path.join(directory, path) full_path = os.path.join(directory, path)
if encoding is None:
# Try to autodetect encoding based on the BOM.
with open(full_path, 'rb') as f:
data = f.read(len(codecs.BOM_UTF16_LE))
if data in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
encoding = 'utf-16'
else:
# utf-8-sig is "utf-8 with optional BOM". It's discouraged
# in most cases but is common with CSV files because Excel
# cannot read utf-8 files without a BOM.
encoding = 'utf-8-sig'
try: try:
# python 3: csv.reader requires a file open in text mode. # python 3: csv.reader requires a file open in text mode.
# Force utf8 to avoid dependence on $LANG environment variable. # Force utf8 to avoid dependence on $LANG environment variable.
f = open(full_path, "r", encoding="utf-8") f = open(full_path, "r", encoding=encoding)
except TypeError: except TypeError:
# python 2: files return byte strings, which are decoded below. # python 2: csv can only handle byte strings (in ascii-compatible
f = open(full_path, "r") # encodings), which we decode below. Transcode everything into
# utf8 before passing it to csv.reader.
f = BytesIO()
with codecs.open(full_path, "r", encoding=encoding) as infile:
f.write(escape.utf8(infile.read()))
f.seek(0)
_translations[locale] = {} _translations[locale] = {}
for i, row in enumerate(csv.reader(f)): for i, row in enumerate(csv.reader(f)):
if not row or len(row) < 2: if not row or len(row) < 2:
@ -491,68 +518,3 @@ class GettextLocale(Locale):
# Translation not found # Translation not found
result = message result = message
return result return result
LOCALE_NAMES = {
"af_ZA": {"name_en": u("Afrikaans"), "name": u("Afrikaans")},
"am_ET": {"name_en": u("Amharic"), "name": u('\u12a0\u121b\u122d\u129b')},
"ar_AR": {"name_en": u("Arabic"), "name": u("\u0627\u0644\u0639\u0631\u0628\u064a\u0629")},
"bg_BG": {"name_en": u("Bulgarian"), "name": u("\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438")},
"bn_IN": {"name_en": u("Bengali"), "name": u("\u09ac\u09be\u0982\u09b2\u09be")},
"bs_BA": {"name_en": u("Bosnian"), "name": u("Bosanski")},
"ca_ES": {"name_en": u("Catalan"), "name": u("Catal\xe0")},
"cs_CZ": {"name_en": u("Czech"), "name": u("\u010ce\u0161tina")},
"cy_GB": {"name_en": u("Welsh"), "name": u("Cymraeg")},
"da_DK": {"name_en": u("Danish"), "name": u("Dansk")},
"de_DE": {"name_en": u("German"), "name": u("Deutsch")},
"el_GR": {"name_en": u("Greek"), "name": u("\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac")},
"en_GB": {"name_en": u("English (UK)"), "name": u("English (UK)")},
"en_US": {"name_en": u("English (US)"), "name": u("English (US)")},
"es_ES": {"name_en": u("Spanish (Spain)"), "name": u("Espa\xf1ol (Espa\xf1a)")},
"es_LA": {"name_en": u("Spanish"), "name": u("Espa\xf1ol")},
"et_EE": {"name_en": u("Estonian"), "name": u("Eesti")},
"eu_ES": {"name_en": u("Basque"), "name": u("Euskara")},
"fa_IR": {"name_en": u("Persian"), "name": u("\u0641\u0627\u0631\u0633\u06cc")},
"fi_FI": {"name_en": u("Finnish"), "name": u("Suomi")},
"fr_CA": {"name_en": u("French (Canada)"), "name": u("Fran\xe7ais (Canada)")},
"fr_FR": {"name_en": u("French"), "name": u("Fran\xe7ais")},
"ga_IE": {"name_en": u("Irish"), "name": u("Gaeilge")},
"gl_ES": {"name_en": u("Galician"), "name": u("Galego")},
"he_IL": {"name_en": u("Hebrew"), "name": u("\u05e2\u05d1\u05e8\u05d9\u05ea")},
"hi_IN": {"name_en": u("Hindi"), "name": u("\u0939\u093f\u0928\u094d\u0926\u0940")},
"hr_HR": {"name_en": u("Croatian"), "name": u("Hrvatski")},
"hu_HU": {"name_en": u("Hungarian"), "name": u("Magyar")},
"id_ID": {"name_en": u("Indonesian"), "name": u("Bahasa Indonesia")},
"is_IS": {"name_en": u("Icelandic"), "name": u("\xcdslenska")},
"it_IT": {"name_en": u("Italian"), "name": u("Italiano")},
"ja_JP": {"name_en": u("Japanese"), "name": u("\u65e5\u672c\u8a9e")},
"ko_KR": {"name_en": u("Korean"), "name": u("\ud55c\uad6d\uc5b4")},
"lt_LT": {"name_en": u("Lithuanian"), "name": u("Lietuvi\u0173")},
"lv_LV": {"name_en": u("Latvian"), "name": u("Latvie\u0161u")},
"mk_MK": {"name_en": u("Macedonian"), "name": u("\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438")},
"ml_IN": {"name_en": u("Malayalam"), "name": u("\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02")},
"ms_MY": {"name_en": u("Malay"), "name": u("Bahasa Melayu")},
"nb_NO": {"name_en": u("Norwegian (bokmal)"), "name": u("Norsk (bokm\xe5l)")},
"nl_NL": {"name_en": u("Dutch"), "name": u("Nederlands")},
"nn_NO": {"name_en": u("Norwegian (nynorsk)"), "name": u("Norsk (nynorsk)")},
"pa_IN": {"name_en": u("Punjabi"), "name": u("\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40")},
"pl_PL": {"name_en": u("Polish"), "name": u("Polski")},
"pt_BR": {"name_en": u("Portuguese (Brazil)"), "name": u("Portugu\xeas (Brasil)")},
"pt_PT": {"name_en": u("Portuguese (Portugal)"), "name": u("Portugu\xeas (Portugal)")},
"ro_RO": {"name_en": u("Romanian"), "name": u("Rom\xe2n\u0103")},
"ru_RU": {"name_en": u("Russian"), "name": u("\u0420\u0443\u0441\u0441\u043a\u0438\u0439")},
"sk_SK": {"name_en": u("Slovak"), "name": u("Sloven\u010dina")},
"sl_SI": {"name_en": u("Slovenian"), "name": u("Sloven\u0161\u010dina")},
"sq_AL": {"name_en": u("Albanian"), "name": u("Shqip")},
"sr_RS": {"name_en": u("Serbian"), "name": u("\u0421\u0440\u043f\u0441\u043a\u0438")},
"sv_SE": {"name_en": u("Swedish"), "name": u("Svenska")},
"sw_KE": {"name_en": u("Swahili"), "name": u("Kiswahili")},
"ta_IN": {"name_en": u("Tamil"), "name": u("\u0ba4\u0bae\u0bbf\u0bb4\u0bcd")},
"te_IN": {"name_en": u("Telugu"), "name": u("\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41")},
"th_TH": {"name_en": u("Thai"), "name": u("\u0e20\u0e32\u0e29\u0e32\u0e44\u0e17\u0e22")},
"tl_PH": {"name_en": u("Filipino"), "name": u("Filipino")},
"tr_TR": {"name_en": u("Turkish"), "name": u("T\xfcrk\xe7e")},
"uk_UA": {"name_en": u("Ukraini "), "name": u("\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430")},
"vi_VN": {"name_en": u("Vietnamese"), "name": u("Ti\u1ebfng Vi\u1ec7t")},
"zh_CN": {"name_en": u("Chinese (Simplified)"), "name": u("\u4e2d\u6587(\u7b80\u4f53)")},
"zh_TW": {"name_en": u("Chinese (Traditional)"), "name": u("\u4e2d\u6587(\u7e41\u9ad4)")},
}

View file

@ -12,13 +12,6 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""
.. testsetup:: *
from tornado import ioloop, gen, locks
io_loop = ioloop.IOLoop.current()
"""
from __future__ import absolute_import, division, print_function, with_statement from __future__ import absolute_import, division, print_function, with_statement
__all__ = ['Condition', 'Event', 'Semaphore', 'BoundedSemaphore', 'Lock'] __all__ = ['Condition', 'Event', 'Semaphore', 'BoundedSemaphore', 'Lock']
@ -61,7 +54,11 @@ class Condition(_TimeoutGarbageCollector):
.. testcode:: .. testcode::
condition = locks.Condition() from tornado import gen
from tornado.ioloop import IOLoop
from tornado.locks import Condition
condition = Condition()
@gen.coroutine @gen.coroutine
def waiter(): def waiter():
@ -80,7 +77,7 @@ class Condition(_TimeoutGarbageCollector):
# Yield two Futures; wait for waiter() and notifier() to finish. # Yield two Futures; wait for waiter() and notifier() to finish.
yield [waiter(), notifier()] yield [waiter(), notifier()]
io_loop.run_sync(runner) IOLoop.current().run_sync(runner)
.. testoutput:: .. testoutput::
@ -92,7 +89,7 @@ class Condition(_TimeoutGarbageCollector):
`wait` takes an optional ``timeout`` argument, which is either an absolute `wait` takes an optional ``timeout`` argument, which is either an absolute
timestamp:: timestamp::
io_loop = ioloop.IOLoop.current() io_loop = IOLoop.current()
# Wait up to 1 second for a notification. # Wait up to 1 second for a notification.
yield condition.wait(timeout=io_loop.time() + 1) yield condition.wait(timeout=io_loop.time() + 1)
@ -161,7 +158,11 @@ class Event(object):
.. testcode:: .. testcode::
event = locks.Event() from tornado import gen
from tornado.ioloop import IOLoop
from tornado.locks import Event
event = Event()
@gen.coroutine @gen.coroutine
def waiter(): def waiter():
@ -180,7 +181,7 @@ class Event(object):
def runner(): def runner():
yield [waiter(), setter()] yield [waiter(), setter()]
io_loop.run_sync(runner) IOLoop.current().run_sync(runner)
.. testoutput:: .. testoutput::
@ -210,7 +211,7 @@ class Event(object):
def clear(self): def clear(self):
"""Reset the internal flag to ``False``. """Reset the internal flag to ``False``.
Calls to `.wait` will block until `.set` is called. Calls to `.wait` will block until `.set` is called.
""" """
if self._future.done(): if self._future.done():
@ -261,7 +262,8 @@ class Semaphore(_TimeoutGarbageCollector):
from collections import deque from collections import deque
from tornado import gen, ioloop from tornado import gen
from tornado.ioloop import IOLoop
from tornado.concurrent import Future from tornado.concurrent import Future
# Ensure reliable doctest output: resolve Futures one at a time. # Ensure reliable doctest output: resolve Futures one at a time.
@ -273,14 +275,18 @@ class Semaphore(_TimeoutGarbageCollector):
yield gen.moment yield gen.moment
f.set_result(None) f.set_result(None)
ioloop.IOLoop.current().add_callback(simulator, list(futures_q)) IOLoop.current().add_callback(simulator, list(futures_q))
def use_some_resource(): def use_some_resource():
return futures_q.popleft() return futures_q.popleft()
.. testcode:: semaphore .. testcode:: semaphore
sem = locks.Semaphore(2) from tornado import gen
from tornado.ioloop import IOLoop
from tornado.locks import Semaphore
sem = Semaphore(2)
@gen.coroutine @gen.coroutine
def worker(worker_id): def worker(worker_id):
@ -297,7 +303,7 @@ class Semaphore(_TimeoutGarbageCollector):
# Join all workers. # Join all workers.
yield [worker(i) for i in range(3)] yield [worker(i) for i in range(3)]
io_loop.run_sync(runner) IOLoop.current().run_sync(runner)
.. testoutput:: semaphore .. testoutput:: semaphore
@ -321,6 +327,20 @@ class Semaphore(_TimeoutGarbageCollector):
# Now the semaphore has been released. # Now the semaphore has been released.
print("Worker %d is done" % worker_id) print("Worker %d is done" % worker_id)
In Python 3.5, the semaphore itself can be used as an async context
manager::
async def worker(worker_id):
async with sem:
print("Worker %d is working" % worker_id)
await use_some_resource()
# Now the semaphore has been released.
print("Worker %d is done" % worker_id)
.. versionchanged:: 4.3
Added ``async with`` support in Python 3.5.
""" """
def __init__(self, value=1): def __init__(self, value=1):
super(Semaphore, self).__init__() super(Semaphore, self).__init__()
@ -383,6 +403,14 @@ class Semaphore(_TimeoutGarbageCollector):
__exit__ = __enter__ __exit__ = __enter__
@gen.coroutine
def __aenter__(self):
yield self.acquire()
@gen.coroutine
def __aexit__(self, typ, value, tb):
self.release()
class BoundedSemaphore(Semaphore): class BoundedSemaphore(Semaphore):
"""A semaphore that prevents release() being called too many times. """A semaphore that prevents release() being called too many times.
@ -412,7 +440,7 @@ class Lock(object):
Releasing an unlocked lock raises `RuntimeError`. Releasing an unlocked lock raises `RuntimeError`.
`acquire` supports the context manager protocol: `acquire` supports the context manager protocol in all Python versions:
>>> from tornado import gen, locks >>> from tornado import gen, locks
>>> lock = locks.Lock() >>> lock = locks.Lock()
@ -424,6 +452,22 @@ class Lock(object):
... pass ... pass
... ...
... # Now the lock is released. ... # Now the lock is released.
In Python 3.5, `Lock` also supports the async context manager
protocol. Note that in this case there is no `acquire`, because
``async with`` includes both the ``yield`` and the ``acquire``
(just as it does with `threading.Lock`):
>>> async def f(): # doctest: +SKIP
... async with lock:
... # Do something holding the lock.
... pass
...
... # Now the lock is released.
.. versionchanged:: 3.5
Added ``async with`` support in Python 3.5.
""" """
def __init__(self): def __init__(self):
self._block = BoundedSemaphore(value=1) self._block = BoundedSemaphore(value=1)
@ -458,3 +502,11 @@ class Lock(object):
"Use Lock like 'with (yield lock)', not like 'with lock'") "Use Lock like 'with (yield lock)', not like 'with lock'")
__exit__ = __enter__ __exit__ = __enter__
@gen.coroutine
def __aenter__(self):
yield self.acquire()
@gen.coroutine
def __aexit__(self, typ, value, tb):
self.release()

View file

@ -68,6 +68,12 @@ instances to define isolated sets of options, such as for subcommands.
from tornado.options import options, parse_command_line from tornado.options import options, parse_command_line
options.logging = None options.logging = None
parse_command_line() parse_command_line()
.. versionchanged:: 4.3
Dashes and underscores are fully interchangeable in option names;
options can be defined, set, and read with any mix of the two.
Dashes are typical for command-line usage while config files require
underscores.
""" """
from __future__ import absolute_import, division, print_function, with_statement from __future__ import absolute_import, division, print_function, with_statement
@ -103,28 +109,38 @@ class OptionParser(object):
self.define("help", type=bool, help="show this help information", self.define("help", type=bool, help="show this help information",
callback=self._help_callback) callback=self._help_callback)
def _normalize_name(self, name):
return name.replace('_', '-')
def __getattr__(self, name): def __getattr__(self, name):
name = self._normalize_name(name)
if isinstance(self._options.get(name), _Option): if isinstance(self._options.get(name), _Option):
return self._options[name].value() return self._options[name].value()
raise AttributeError("Unrecognized option %r" % name) raise AttributeError("Unrecognized option %r" % name)
def __setattr__(self, name, value): def __setattr__(self, name, value):
name = self._normalize_name(name)
if isinstance(self._options.get(name), _Option): if isinstance(self._options.get(name), _Option):
return self._options[name].set(value) return self._options[name].set(value)
raise AttributeError("Unrecognized option %r" % name) raise AttributeError("Unrecognized option %r" % name)
def __iter__(self): def __iter__(self):
return iter(self._options) return (opt.name for opt in self._options.values())
def __getitem__(self, item): def __contains__(self, name):
return self._options[item].value() name = self._normalize_name(name)
return name in self._options
def __getitem__(self, name):
name = self._normalize_name(name)
return self._options[name].value()
def items(self): def items(self):
"""A sequence of (name, value) pairs. """A sequence of (name, value) pairs.
.. versionadded:: 3.1 .. versionadded:: 3.1
""" """
return [(name, opt.value()) for name, opt in self._options.items()] return [(opt.name, opt.value()) for name, opt in self._options.items()]
def groups(self): def groups(self):
"""The set of option-groups created by ``define``. """The set of option-groups created by ``define``.
@ -151,7 +167,7 @@ class OptionParser(object):
.. versionadded:: 3.1 .. versionadded:: 3.1
""" """
return dict( return dict(
(name, opt.value()) for name, opt in self._options.items() (opt.name, opt.value()) for name, opt in self._options.items()
if not group or group == opt.group_name) if not group or group == opt.group_name)
def as_dict(self): def as_dict(self):
@ -160,7 +176,7 @@ class OptionParser(object):
.. versionadded:: 3.1 .. versionadded:: 3.1
""" """
return dict( return dict(
(name, opt.value()) for name, opt in self._options.items()) (opt.name, opt.value()) for name, opt in self._options.items())
def define(self, name, default=None, type=None, help=None, metavar=None, def define(self, name, default=None, type=None, help=None, metavar=None,
multiple=False, group=None, callback=None): multiple=False, group=None, callback=None):
@ -223,11 +239,13 @@ class OptionParser(object):
group_name = group group_name = group
else: else:
group_name = file_name group_name = file_name
self._options[name] = _Option(name, file_name=file_name, normalized = self._normalize_name(name)
default=default, type=type, help=help, option = _Option(name, file_name=file_name,
metavar=metavar, multiple=multiple, default=default, type=type, help=help,
group_name=group_name, metavar=metavar, multiple=multiple,
callback=callback) group_name=group_name,
callback=callback)
self._options[normalized] = option
def parse_command_line(self, args=None, final=True): def parse_command_line(self, args=None, final=True):
"""Parses all options given on the command line (defaults to """Parses all options given on the command line (defaults to
@ -255,7 +273,7 @@ class OptionParser(object):
break break
arg = args[i].lstrip("-") arg = args[i].lstrip("-")
name, equals, value = arg.partition("=") name, equals, value = arg.partition("=")
name = name.replace('-', '_') name = self._normalize_name(name)
if name not in self._options: if name not in self._options:
self.print_help() self.print_help()
raise Error('Unrecognized command line option: %r' % name) raise Error('Unrecognized command line option: %r' % name)
@ -287,8 +305,9 @@ class OptionParser(object):
with open(path, 'rb') as f: with open(path, 'rb') as f:
exec_in(native_str(f.read()), config, config) exec_in(native_str(f.read()), config, config)
for name in config: for name in config:
if name in self._options: normalized = self._normalize_name(name)
self._options[name].set(config[name]) if normalized in self._options:
self._options[normalized].set(config[name])
if final: if final:
self.run_parse_callbacks() self.run_parse_callbacks()
@ -308,7 +327,8 @@ class OptionParser(object):
print("\n%s options:\n" % os.path.normpath(filename), file=file) print("\n%s options:\n" % os.path.normpath(filename), file=file)
o.sort(key=lambda option: option.name) o.sort(key=lambda option: option.name)
for option in o: for option in o:
prefix = option.name # Always print names with dashes in a CLI context.
prefix = self._normalize_name(option.name)
if option.metavar: if option.metavar:
prefix += "=" + option.metavar prefix += "=" + option.metavar
description = option.help or "" description = option.help or ""

View file

@ -35,7 +35,6 @@ class BaseAsyncIOLoop(IOLoop):
super(BaseAsyncIOLoop, self).initialize(**kwargs) super(BaseAsyncIOLoop, self).initialize(**kwargs)
self.asyncio_loop = asyncio_loop self.asyncio_loop = asyncio_loop
self.close_loop = close_loop self.close_loop = close_loop
self.asyncio_loop.call_soon(self.make_current)
# Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler) # Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
self.handlers = {} self.handlers = {}
# Set of fds listening for reads/writes # Set of fds listening for reads/writes
@ -105,8 +104,16 @@ class BaseAsyncIOLoop(IOLoop):
handler_func(fileobj, events) handler_func(fileobj, events)
def start(self): def start(self):
self._setup_logging() old_current = IOLoop.current(instance=False)
self.asyncio_loop.run_forever() try:
self._setup_logging()
self.make_current()
self.asyncio_loop.run_forever()
finally:
if old_current is None:
IOLoop.clear_current()
else:
old_current.make_current()
def stop(self): def stop(self):
self.asyncio_loop.stop() self.asyncio_loop.stop()
@ -140,8 +147,14 @@ class AsyncIOMainLoop(BaseAsyncIOLoop):
class AsyncIOLoop(BaseAsyncIOLoop): class AsyncIOLoop(BaseAsyncIOLoop):
def initialize(self, **kwargs): def initialize(self, **kwargs):
super(AsyncIOLoop, self).initialize(asyncio.new_event_loop(), loop = asyncio.new_event_loop()
close_loop=True, **kwargs) try:
super(AsyncIOLoop, self).initialize(loop, close_loop=True, **kwargs)
except Exception:
# If initialize() does not succeed (taking ownership of the loop),
# we have to close it.
loop.close()
raise
def to_tornado_future(asyncio_future): def to_tornado_future(asyncio_future):

View file

@ -423,7 +423,6 @@ class TwistedIOLoop(tornado.ioloop.IOLoop):
reactor = twisted.internet.reactor reactor = twisted.internet.reactor
self.reactor = reactor self.reactor = reactor
self.fds = {} self.fds = {}
self.reactor.callWhenRunning(self.make_current)
def close(self, all_fds=False): def close(self, all_fds=False):
fds = self.fds fds = self.fds
@ -477,8 +476,16 @@ class TwistedIOLoop(tornado.ioloop.IOLoop):
del self.fds[fd] del self.fds[fd]
def start(self): def start(self):
self._setup_logging() old_current = IOLoop.current(instance=False)
self.reactor.run() try:
self._setup_logging()
self.make_current()
self.reactor.run()
finally:
if old_current is None:
IOLoop.clear_current()
else:
old_current.make_current()
def stop(self): def stop(self):
self.reactor.crash() self.reactor.crash()

View file

@ -44,6 +44,14 @@ def _set_timeout(future, timeout):
lambda _: io_loop.remove_timeout(timeout_handle)) lambda _: io_loop.remove_timeout(timeout_handle))
class _QueueIterator(object):
def __init__(self, q):
self.q = q
def __anext__(self):
return self.q.get()
class Queue(object): class Queue(object):
"""Coordinate producer and consumer coroutines. """Coordinate producer and consumer coroutines.
@ -51,7 +59,11 @@ class Queue(object):
.. testcode:: .. testcode::
q = queues.Queue(maxsize=2) from tornado import gen
from tornado.ioloop import IOLoop
from tornado.queues import Queue
q = Queue(maxsize=2)
@gen.coroutine @gen.coroutine
def consumer(): def consumer():
@ -71,19 +83,20 @@ class Queue(object):
@gen.coroutine @gen.coroutine
def main(): def main():
consumer() # Start consumer. # Start consumer without waiting (since it never finishes).
IOLoop.current().spawn_callback(consumer)
yield producer() # Wait for producer to put all tasks. yield producer() # Wait for producer to put all tasks.
yield q.join() # Wait for consumer to finish all tasks. yield q.join() # Wait for consumer to finish all tasks.
print('Done') print('Done')
io_loop.run_sync(main) IOLoop.current().run_sync(main)
.. testoutput:: .. testoutput::
Put 0 Put 0
Put 1 Put 1
Put 2
Doing work on 0 Doing work on 0
Put 2
Doing work on 1 Doing work on 1
Put 3 Put 3
Doing work on 2 Doing work on 2
@ -91,6 +104,21 @@ class Queue(object):
Doing work on 3 Doing work on 3
Doing work on 4 Doing work on 4
Done Done
In Python 3.5, `Queue` implements the async iterator protocol, so
``consumer()`` could be rewritten as::
async def consumer():
async for item in q:
try:
print('Doing work on %s' % item)
yield gen.sleep(0.01)
finally:
q.task_done()
.. versionchanged:: 4.3
Added ``async for`` support in Python 3.5.
""" """
def __init__(self, maxsize=0): def __init__(self, maxsize=0):
if maxsize is None: if maxsize is None:
@ -215,6 +243,10 @@ class Queue(object):
""" """
return self._finished.wait(timeout) return self._finished.wait(timeout)
@gen.coroutine
def __aiter__(self):
return _QueueIterator(self)
# These three are overridable in subclasses. # These three are overridable in subclasses.
def _init(self): def _init(self):
self._queue = collections.deque() self._queue = collections.deque()
@ -266,7 +298,9 @@ class PriorityQueue(Queue):
.. testcode:: .. testcode::
q = queues.PriorityQueue() from tornado.queues import PriorityQueue
q = PriorityQueue()
q.put((1, 'medium-priority item')) q.put((1, 'medium-priority item'))
q.put((0, 'high-priority item')) q.put((0, 'high-priority item'))
q.put((10, 'low-priority item')) q.put((10, 'low-priority item'))
@ -296,7 +330,9 @@ class LifoQueue(Queue):
.. testcode:: .. testcode::
q = queues.LifoQueue() from tornado.queues import LifoQueue
q = LifoQueue()
q.put(3) q.put(3)
q.put(2) q.put(2)
q.put(1) q.put(1)

View file

@ -427,7 +427,10 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
if self.final_callback: if self.final_callback:
self._remove_timeout() self._remove_timeout()
if isinstance(value, StreamClosedError): if isinstance(value, StreamClosedError):
value = HTTPError(599, "Stream closed") if value.real_error is None:
value = HTTPError(599, "Stream closed")
else:
value = value.real_error
self._run_callback(HTTPResponse(self.request, 599, error=value, self._run_callback(HTTPResponse(self.request, 599, error=value,
request_time=self.io_loop.time() - self.start_time, request_time=self.io_loop.time() - self.start_time,
)) ))

View file

@ -186,6 +186,11 @@ with ``{# ... #}``.
``{% while *condition* %}... {% end %}`` ``{% while *condition* %}... {% end %}``
Same as the python ``while`` statement. ``{% break %}`` and Same as the python ``while`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop. ``{% continue %}`` may be used inside the loop.
``{% whitespace *mode* %}``
Sets the whitespace mode for the remainder of the current file
(or until the next ``{% whitespace %}`` directive). See
`filter_whitespace` for available options. New in Tornado 4.3.
""" """
from __future__ import absolute_import, division, print_function, with_statement from __future__ import absolute_import, division, print_function, with_statement
@ -210,6 +215,31 @@ _DEFAULT_AUTOESCAPE = "xhtml_escape"
_UNSET = object() _UNSET = object()
def filter_whitespace(mode, text):
"""Transform whitespace in ``text`` according to ``mode``.
Available modes are:
* ``all``: Return all whitespace unmodified.
* ``single``: Collapse consecutive whitespace with a single whitespace
character, preserving newlines.
* ``oneline``: Collapse all runs of whitespace into a single space
character, removing all newlines in the process.
.. versionadded:: 4.3
"""
if mode == 'all':
return text
elif mode == 'single':
text = re.sub(r"([\t ]+)", " ", text)
text = re.sub(r"(\s*\n\s*)", "\n", text)
return text
elif mode == 'oneline':
return re.sub(r"(\s+)", " ", text)
else:
raise Exception("invalid whitespace mode %s" % mode)
class Template(object): class Template(object):
"""A compiled template. """A compiled template.
@ -220,21 +250,58 @@ class Template(object):
# autodoc because _UNSET looks like garbage. When changing # autodoc because _UNSET looks like garbage. When changing
# this signature update website/sphinx/template.rst too. # this signature update website/sphinx/template.rst too.
def __init__(self, template_string, name="<string>", loader=None, def __init__(self, template_string, name="<string>", loader=None,
compress_whitespace=None, autoescape=_UNSET): compress_whitespace=_UNSET, autoescape=_UNSET,
whitespace=None):
"""Construct a Template.
:arg str template_string: the contents of the template file.
:arg str name: the filename from which the template was loaded
(used for error message).
:arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible for this template,
used to resolve ``{% include %}`` and ``{% extend %}``
directives.
:arg bool compress_whitespace: Deprecated since Tornado 4.3.
Equivalent to ``whitespace="single"`` if true and
``whitespace="all"`` if false.
:arg str autoescape: The name of a function in the template
namespace, or ``None`` to disable escaping by default.
:arg str whitespace: A string specifying treatment of whitespace;
see `filter_whitespace` for options.
.. versionchanged:: 4.3
Added ``whitespace`` parameter; deprecated ``compress_whitespace``.
"""
self.name = name self.name = name
if compress_whitespace is None:
compress_whitespace = name.endswith(".html") or \ if compress_whitespace is not _UNSET:
name.endswith(".js") # Convert deprecated compress_whitespace (bool) to whitespace (str).
if whitespace is not None:
raise Exception("cannot set both whitespace and compress_whitespace")
whitespace = "single" if compress_whitespace else "all"
if whitespace is None:
if loader and loader.whitespace:
whitespace = loader.whitespace
else:
# Whitespace defaults by filename.
if name.endswith(".html") or name.endswith(".js"):
whitespace = "single"
else:
whitespace = "all"
# Validate the whitespace setting.
filter_whitespace(whitespace, '')
if autoescape is not _UNSET: if autoescape is not _UNSET:
self.autoescape = autoescape self.autoescape = autoescape
elif loader: elif loader:
self.autoescape = loader.autoescape self.autoescape = loader.autoescape
else: else:
self.autoescape = _DEFAULT_AUTOESCAPE self.autoescape = _DEFAULT_AUTOESCAPE
self.namespace = loader.namespace if loader else {} self.namespace = loader.namespace if loader else {}
reader = _TemplateReader(name, escape.native_str(template_string)) reader = _TemplateReader(name, escape.native_str(template_string),
whitespace)
self.file = _File(self, _parse(reader, self)) self.file = _File(self, _parse(reader, self))
self.code = self._generate_python(loader, compress_whitespace) self.code = self._generate_python(loader)
self.loader = loader self.loader = loader
try: try:
# Under python2.5, the fake filename used here must match # Under python2.5, the fake filename used here must match
@ -277,7 +344,7 @@ class Template(object):
linecache.clearcache() linecache.clearcache()
return execute() return execute()
def _generate_python(self, loader, compress_whitespace): def _generate_python(self, loader):
buffer = StringIO() buffer = StringIO()
try: try:
# named_blocks maps from names to _NamedBlock objects # named_blocks maps from names to _NamedBlock objects
@ -286,8 +353,8 @@ class Template(object):
ancestors.reverse() ancestors.reverse()
for ancestor in ancestors: for ancestor in ancestors:
ancestor.find_named_blocks(loader, named_blocks) ancestor.find_named_blocks(loader, named_blocks)
writer = _CodeWriter(buffer, named_blocks, loader, ancestors[0].template, writer = _CodeWriter(buffer, named_blocks, loader,
compress_whitespace) ancestors[0].template)
ancestors[0].generate(writer) ancestors[0].generate(writer)
return buffer.getvalue() return buffer.getvalue()
finally: finally:
@ -312,12 +379,26 @@ class BaseLoader(object):
``{% extends %}`` and ``{% include %}``. The loader caches all ``{% extends %}`` and ``{% include %}``. The loader caches all
templates after they are loaded the first time. templates after they are loaded the first time.
""" """
def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None): def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None,
"""``autoescape`` must be either None or a string naming a function whitespace=None):
in the template namespace, such as "xhtml_escape". """Construct a template loader.
:arg str autoescape: The name of a function in the template
namespace, such as "xhtml_escape", or ``None`` to disable
autoescaping by default.
:arg dict namespace: A dictionary to be added to the default template
namespace, or ``None``.
:arg str whitespace: A string specifying default behavior for
whitespace in templates; see `filter_whitespace` for options.
Default is "single" for files ending in ".html" and ".js" and
"all" for other files.
.. versionchanged:: 4.3
Added ``whitespace`` parameter.
""" """
self.autoescape = autoescape self.autoescape = autoescape
self.namespace = namespace or {} self.namespace = namespace or {}
self.whitespace = whitespace
self.templates = {} self.templates = {}
# self.lock protects self.templates. It's a reentrant lock # self.lock protects self.templates. It's a reentrant lock
# because templates may load other templates via `include` or # because templates may load other templates via `include` or
@ -558,37 +639,49 @@ class _Module(_Expression):
class _Text(_Node): class _Text(_Node):
def __init__(self, value, line): def __init__(self, value, line, whitespace):
self.value = value self.value = value
self.line = line self.line = line
self.whitespace = whitespace
def generate(self, writer): def generate(self, writer):
value = self.value value = self.value
# Compress lots of white space to a single character. If the whitespace # Compress whitespace if requested, with a crude heuristic to avoid
# breaks a line, have it continue to break a line, but just with a # altering preformatted whitespace.
# single \n character if "<pre>" not in value:
if writer.compress_whitespace and "<pre>" not in value: value = filter_whitespace(self.whitespace, value)
value = re.sub(r"([\t ]+)", " ", value)
value = re.sub(r"(\s*\n\s*)", "\n", value)
if value: if value:
writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line) writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
class ParseError(Exception): class ParseError(Exception):
"""Raised for template syntax errors.""" """Raised for template syntax errors.
pass
``ParseError`` instances have ``filename`` and ``lineno`` attributes
indicating the position of the error.
.. versionchanged:: 4.3
Added ``filename`` and ``lineno`` attributes.
"""
def __init__(self, message, filename, lineno):
self.message = message
# The names "filename" and "lineno" are chosen for consistency
# with python SyntaxError.
self.filename = filename
self.lineno = lineno
def __str__(self):
return '%s at %s:%d' % (self.message, self.filename, self.lineno)
class _CodeWriter(object): class _CodeWriter(object):
def __init__(self, file, named_blocks, loader, current_template, def __init__(self, file, named_blocks, loader, current_template):
compress_whitespace):
self.file = file self.file = file
self.named_blocks = named_blocks self.named_blocks = named_blocks
self.loader = loader self.loader = loader
self.current_template = current_template self.current_template = current_template
self.compress_whitespace = compress_whitespace
self.apply_counter = 0 self.apply_counter = 0
self.include_stack = [] self.include_stack = []
self._indent = 0 self._indent = 0
@ -633,9 +726,10 @@ class _CodeWriter(object):
class _TemplateReader(object): class _TemplateReader(object):
def __init__(self, name, text): def __init__(self, name, text, whitespace):
self.name = name self.name = name
self.text = text self.text = text
self.whitespace = whitespace
self.line = 1 self.line = 1
self.pos = 0 self.pos = 0
@ -687,6 +781,9 @@ class _TemplateReader(object):
def __str__(self): def __str__(self):
return self.text[self.pos:] return self.text[self.pos:]
def raise_parse_error(self, msg):
raise ParseError(msg, self.name, self.line)
def _format_code(code): def _format_code(code):
lines = code.splitlines() lines = code.splitlines()
@ -704,9 +801,10 @@ def _parse(reader, template, in_block=None, in_loop=None):
if curly == -1 or curly + 1 == reader.remaining(): if curly == -1 or curly + 1 == reader.remaining():
# EOF # EOF
if in_block: if in_block:
raise ParseError("Missing {%% end %%} block for %s" % reader.raise_parse_error(
in_block) "Missing {%% end %%} block for %s" % in_block)
body.chunks.append(_Text(reader.consume(), reader.line)) body.chunks.append(_Text(reader.consume(), reader.line,
reader.whitespace))
return body return body
# If the first curly brace is not the start of a special token, # If the first curly brace is not the start of a special token,
# start searching from the character after it # start searching from the character after it
@ -725,7 +823,8 @@ def _parse(reader, template, in_block=None, in_loop=None):
# Append any text before the special token # Append any text before the special token
if curly > 0: if curly > 0:
cons = reader.consume(curly) cons = reader.consume(curly)
body.chunks.append(_Text(cons, reader.line)) body.chunks.append(_Text(cons, reader.line,
reader.whitespace))
start_brace = reader.consume(2) start_brace = reader.consume(2)
line = reader.line line = reader.line
@ -736,14 +835,15 @@ def _parse(reader, template, in_block=None, in_loop=None):
# which also use double braces. # which also use double braces.
if reader.remaining() and reader[0] == "!": if reader.remaining() and reader[0] == "!":
reader.consume(1) reader.consume(1)
body.chunks.append(_Text(start_brace, line)) body.chunks.append(_Text(start_brace, line,
reader.whitespace))
continue continue
# Comment # Comment
if start_brace == "{#": if start_brace == "{#":
end = reader.find("#}") end = reader.find("#}")
if end == -1: if end == -1:
raise ParseError("Missing end expression #} on line %d" % line) reader.raise_parse_error("Missing end comment #}")
contents = reader.consume(end).strip() contents = reader.consume(end).strip()
reader.consume(2) reader.consume(2)
continue continue
@ -752,11 +852,11 @@ def _parse(reader, template, in_block=None, in_loop=None):
if start_brace == "{{": if start_brace == "{{":
end = reader.find("}}") end = reader.find("}}")
if end == -1: if end == -1:
raise ParseError("Missing end expression }} on line %d" % line) reader.raise_parse_error("Missing end expression }}")
contents = reader.consume(end).strip() contents = reader.consume(end).strip()
reader.consume(2) reader.consume(2)
if not contents: if not contents:
raise ParseError("Empty expression on line %d" % line) reader.raise_parse_error("Empty expression")
body.chunks.append(_Expression(contents, line)) body.chunks.append(_Expression(contents, line))
continue continue
@ -764,11 +864,11 @@ def _parse(reader, template, in_block=None, in_loop=None):
assert start_brace == "{%", start_brace assert start_brace == "{%", start_brace
end = reader.find("%}") end = reader.find("%}")
if end == -1: if end == -1:
raise ParseError("Missing end block %%} on line %d" % line) reader.raise_parse_error("Missing end block %}")
contents = reader.consume(end).strip() contents = reader.consume(end).strip()
reader.consume(2) reader.consume(2)
if not contents: if not contents:
raise ParseError("Empty block tag ({%% %%}) on line %d" % line) reader.raise_parse_error("Empty block tag ({% %})")
operator, space, suffix = contents.partition(" ") operator, space, suffix = contents.partition(" ")
suffix = suffix.strip() suffix = suffix.strip()
@ -783,40 +883,43 @@ def _parse(reader, template, in_block=None, in_loop=None):
allowed_parents = intermediate_blocks.get(operator) allowed_parents = intermediate_blocks.get(operator)
if allowed_parents is not None: if allowed_parents is not None:
if not in_block: if not in_block:
raise ParseError("%s outside %s block" % reader.raise_parse_error("%s outside %s block" %
(operator, allowed_parents)) (operator, allowed_parents))
if in_block not in allowed_parents: if in_block not in allowed_parents:
raise ParseError("%s block cannot be attached to %s block" % (operator, in_block)) reader.raise_parse_error(
"%s block cannot be attached to %s block" %
(operator, in_block))
body.chunks.append(_IntermediateControlBlock(contents, line)) body.chunks.append(_IntermediateControlBlock(contents, line))
continue continue
# End tag # End tag
elif operator == "end": elif operator == "end":
if not in_block: if not in_block:
raise ParseError("Extra {%% end %%} block on line %d" % line) reader.raise_parse_error("Extra {% end %} block")
return body return body
elif operator in ("extends", "include", "set", "import", "from", elif operator in ("extends", "include", "set", "import", "from",
"comment", "autoescape", "raw", "module"): "comment", "autoescape", "whitespace", "raw",
"module"):
if operator == "comment": if operator == "comment":
continue continue
if operator == "extends": if operator == "extends":
suffix = suffix.strip('"').strip("'") suffix = suffix.strip('"').strip("'")
if not suffix: if not suffix:
raise ParseError("extends missing file path on line %d" % line) reader.raise_parse_error("extends missing file path")
block = _ExtendsBlock(suffix) block = _ExtendsBlock(suffix)
elif operator in ("import", "from"): elif operator in ("import", "from"):
if not suffix: if not suffix:
raise ParseError("import missing statement on line %d" % line) reader.raise_parse_error("import missing statement")
block = _Statement(contents, line) block = _Statement(contents, line)
elif operator == "include": elif operator == "include":
suffix = suffix.strip('"').strip("'") suffix = suffix.strip('"').strip("'")
if not suffix: if not suffix:
raise ParseError("include missing file path on line %d" % line) reader.raise_parse_error("include missing file path")
block = _IncludeBlock(suffix, reader, line) block = _IncludeBlock(suffix, reader, line)
elif operator == "set": elif operator == "set":
if not suffix: if not suffix:
raise ParseError("set missing statement on line %d" % line) reader.raise_parse_error("set missing statement")
block = _Statement(suffix, line) block = _Statement(suffix, line)
elif operator == "autoescape": elif operator == "autoescape":
fn = suffix.strip() fn = suffix.strip()
@ -824,6 +927,12 @@ def _parse(reader, template, in_block=None, in_loop=None):
fn = None fn = None
template.autoescape = fn template.autoescape = fn
continue continue
elif operator == "whitespace":
mode = suffix.strip()
# Validate the selected mode
filter_whitespace(mode, '')
reader.whitespace = mode
continue
elif operator == "raw": elif operator == "raw":
block = _Expression(suffix, line, raw=True) block = _Expression(suffix, line, raw=True)
elif operator == "module": elif operator == "module":
@ -844,11 +953,11 @@ def _parse(reader, template, in_block=None, in_loop=None):
if operator == "apply": if operator == "apply":
if not suffix: if not suffix:
raise ParseError("apply missing method name on line %d" % line) reader.raise_parse_error("apply missing method name")
block = _ApplyBlock(suffix, line, block_body) block = _ApplyBlock(suffix, line, block_body)
elif operator == "block": elif operator == "block":
if not suffix: if not suffix:
raise ParseError("block missing name on line %d" % line) reader.raise_parse_error("block missing name")
block = _NamedBlock(suffix, block_body, template, line) block = _NamedBlock(suffix, block_body, template, line)
else: else:
block = _ControlBlock(contents, line, block_body) block = _ControlBlock(contents, line, block_body)
@ -857,9 +966,10 @@ def _parse(reader, template, in_block=None, in_loop=None):
elif operator in ("break", "continue"): elif operator in ("break", "continue"):
if not in_loop: if not in_loop:
raise ParseError("%s outside %s block" % (operator, set(["for", "while"]))) reader.raise_parse_error("%s outside %s block" %
(operator, set(["for", "while"])))
body.chunks.append(_Statement(contents, line)) body.chunks.append(_Statement(contents, line))
continue continue
else: else:
raise ParseError("unknown operator: %r" % operator) reader.raise_parse_error("unknown operator: %r" % operator)

View file

@ -47,6 +47,11 @@ try:
except ImportError: except ImportError:
from io import StringIO # py3 from io import StringIO # py3
try:
from collections.abc import Generator as GeneratorType # py35+
except ImportError:
from types import GeneratorType
# Tornado's own test suite requires the updated unittest module # Tornado's own test suite requires the updated unittest module
# (either py27+ or unittest2) so tornado.test.util enforces # (either py27+ or unittest2) so tornado.test.util enforces
# this requirement, but for other users of tornado.testing we want # this requirement, but for other users of tornado.testing we want
@ -118,7 +123,7 @@ class _TestMethodWrapper(object):
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
result = self.orig_method(*args, **kwargs) result = self.orig_method(*args, **kwargs)
if isinstance(result, types.GeneratorType): if isinstance(result, GeneratorType):
raise TypeError("Generator test methods should be decorated with " raise TypeError("Generator test methods should be decorated with "
"tornado.testing.gen_test") "tornado.testing.gen_test")
elif result is not None: elif result is not None:
@ -331,20 +336,29 @@ class AsyncHTTPTestCase(AsyncTestCase):
Tests will typically use the provided ``self.http_client`` to fetch Tests will typically use the provided ``self.http_client`` to fetch
URLs from this server. URLs from this server.
Example:: Example, assuming the "Hello, world" example from the user guide is in
``hello.py``::
class MyHTTPTest(AsyncHTTPTestCase): import hello
class TestHelloApp(AsyncHTTPTestCase):
def get_app(self): def get_app(self):
return Application([('/', MyHandler)...]) return hello.make_app()
def test_homepage(self): def test_homepage(self):
# The following two lines are equivalent to response = self.fetch('/')
# response = self.fetch('/') self.assertEqual(response.code, 200)
# but are shown in full here to demonstrate explicit use self.assertEqual(response.body, 'Hello, world')
# of self.stop and self.wait.
self.http_client.fetch(self.get_url('/'), self.stop) That call to ``self.fetch()`` is equivalent to ::
response = self.wait()
# test contents of response self.http_client.fetch(self.get_url('/'), self.stop)
response = self.wait()
which illustrates how AsyncTestCase can turn an asynchronous operation,
like ``http_client.fetch()``, into a synchronous operation. If you need
to do other asynchronous operations in tests, you'll probably need to use
``stop()`` and ``wait()`` yourself.
""" """
def setUp(self): def setUp(self):
super(AsyncHTTPTestCase, self).setUp() super(AsyncHTTPTestCase, self).setUp()
@ -485,7 +499,7 @@ def gen_test(func=None, timeout=None):
@functools.wraps(f) @functools.wraps(f)
def pre_coroutine(self, *args, **kwargs): def pre_coroutine(self, *args, **kwargs):
result = f(self, *args, **kwargs) result = f(self, *args, **kwargs)
if isinstance(result, types.GeneratorType): if isinstance(result, GeneratorType):
self._test_generator = result self._test_generator = result
else: else:
self._test_generator = None self._test_generator = None
@ -575,10 +589,16 @@ class ExpectLog(logging.Filter):
Useful to make tests of error conditions less noisy, while still Useful to make tests of error conditions less noisy, while still
leaving unexpected log entries visible. *Not thread safe.* leaving unexpected log entries visible. *Not thread safe.*
The attribute ``logged_stack`` is set to true if any exception
stack trace was logged.
Usage:: Usage::
with ExpectLog('tornado.application', "Uncaught exception"): with ExpectLog('tornado.application', "Uncaught exception"):
error_response = self.fetch("/some_page") error_response = self.fetch("/some_page")
.. versionchanged:: 4.3
Added the ``logged_stack`` attribute.
""" """
def __init__(self, logger, regex, required=True): def __init__(self, logger, regex, required=True):
"""Constructs an ExpectLog context manager. """Constructs an ExpectLog context manager.
@ -596,8 +616,11 @@ class ExpectLog(logging.Filter):
self.regex = re.compile(regex) self.regex = re.compile(regex)
self.required = required self.required = required
self.matched = False self.matched = False
self.logged_stack = False
def filter(self, record): def filter(self, record):
if record.exc_info:
self.logged_stack = True
message = record.getMessage() message = record.getMessage()
if self.regex.match(message): if self.regex.match(message):
self.matched = True self.matched = True
@ -606,6 +629,7 @@ class ExpectLog(logging.Filter):
def __enter__(self): def __enter__(self):
self.logger.addFilter(self) self.logger.addFilter(self)
return self
def __exit__(self, typ, value, tb): def __exit__(self, typ, value, tb):
self.logger.removeFilter(self) self.logger.removeFilter(self)

View file

@ -13,7 +13,6 @@ and `.Resolver`.
from __future__ import absolute_import, division, print_function, with_statement from __future__ import absolute_import, division, print_function, with_statement
import array import array
import inspect
import os import os
import sys import sys
import zlib import zlib
@ -24,6 +23,13 @@ try:
except NameError: except NameError:
xrange = range # py3 xrange = range # py3
# inspect.getargspec() raises DeprecationWarnings in Python 3.5.
# The two functions have compatible interfaces for the parts we need.
try:
from inspect import getfullargspec as getargspec # py3
except ImportError:
from inspect import getargspec # py2
class ObjectDict(dict): class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access. """Makes a dictionary behave like an object, with attribute-style access.
@ -284,7 +290,7 @@ class ArgReplacer(object):
def __init__(self, func, name): def __init__(self, func, name):
self.name = name self.name = name
try: try:
self.arg_pos = inspect.getargspec(func).args.index(self.name) self.arg_pos = getargspec(func).args.index(self.name)
except ValueError: except ValueError:
# Not a positional parameter # Not a positional parameter
self.arg_pos = None self.arg_pos = None

View file

@ -362,10 +362,8 @@ class RequestHandler(object):
else: else:
raise TypeError("Unsupported header value %r" % value) raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject # If \n is allowed into the header, it is possible to inject
# additional headers or split the request. Also cap length to # additional headers or split the request.
# prevent obviously erroneous values. if RequestHandler._INVALID_HEADER_CHAR_RE.search(value):
if (len(value) > 4000 or
RequestHandler._INVALID_HEADER_CHAR_RE.search(value)):
raise ValueError("Unsafe header value %r", value) raise ValueError("Unsafe header value %r", value)
return value return value
@ -841,8 +839,9 @@ class RequestHandler(object):
May be overridden by subclasses. By default returns a May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the directory-based loader on the given path, using the
``autoescape`` application setting. If a ``template_loader`` ``autoescape`` and ``template_whitespace`` application
application setting is supplied, uses that instead. settings. If a ``template_loader`` application setting is
supplied, uses that instead.
""" """
settings = self.application.settings settings = self.application.settings
if "template_loader" in settings: if "template_loader" in settings:
@ -852,6 +851,8 @@ class RequestHandler(object):
# autoescape=None means "no escaping", so we have to be sure # autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it. # to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"] kwargs["autoescape"] = settings["autoescape"]
if "template_whitespace" in settings:
kwargs["whitespace"] = settings["template_whitespace"]
return template.Loader(template_path, **kwargs) return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False, callback=None): def flush(self, include_footers=False, callback=None):
@ -1391,10 +1392,8 @@ class RequestHandler(object):
self.check_xsrf_cookie() self.check_xsrf_cookie()
result = self.prepare() result = self.prepare()
if is_future(result):
result = yield result
if result is not None: if result is not None:
raise TypeError("Expected None, got %r" % result) result = yield result
if self._prepared_future is not None: if self._prepared_future is not None:
# Tell the Application we've finished with prepare() # Tell the Application we've finished with prepare()
# and are ready for the body to arrive. # and are ready for the body to arrive.
@ -1414,10 +1413,8 @@ class RequestHandler(object):
method = getattr(self, self.request.method.lower()) method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs) result = method(*self.path_args, **self.path_kwargs)
if is_future(result):
result = yield result
if result is not None: if result is not None:
raise TypeError("Expected None, got %r" % result) result = yield result
if self._auto_finish and not self._finished: if self._auto_finish and not self._finished:
self.finish() self.finish()
except Exception as e: except Exception as e:
@ -2151,6 +2148,11 @@ class StaticFileHandler(RequestHandler):
the ``path`` argument to the get() method (different than the constructor the ``path`` argument to the get() method (different than the constructor
argument above); see `URLSpec` for details. argument above); see `URLSpec` for details.
To serve a file like ``index.html`` automatically when a directory is
requested, set ``static_handler_args=dict(default_filename="index.html")``
in your application settings, or add ``default_filename`` as an initializer
argument for your ``StaticFileHandler``.
To maximize the effectiveness of browser caching, this class supports To maximize the effectiveness of browser caching, this class supports
versioned urls (by default using the argument ``?v=``). If a version versioned urls (by default using the argument ``?v=``). If a version
is given, we instruct the browser to cache this file indefinitely. is given, we instruct the browser to cache this file indefinitely.
@ -2162,8 +2164,7 @@ class StaticFileHandler(RequestHandler):
a dedicated static file server (such as nginx or Apache). We support a dedicated static file server (such as nginx or Apache). We support
the HTTP ``Accept-Ranges`` mechanism to return partial content (because the HTTP ``Accept-Ranges`` mechanism to return partial content (because
some browsers require this functionality to be present to seek in some browsers require this functionality to be present to seek in
HTML5 audio or video), but this handler should not be used with HTML5 audio or video).
files that are too large to fit comfortably in memory.
**Subclassing notes** **Subclassing notes**
@ -2379,9 +2380,13 @@ class StaticFileHandler(RequestHandler):
.. versionadded:: 3.1 .. versionadded:: 3.1
""" """
root = os.path.abspath(root) # os.path.abspath strips a trailing /.
# os.path.abspath strips a trailing / # We must add it back to `root` so that we only match files
# it needs to be temporarily added back for requests to root/ # in a directory named `root` instead of files starting with
# that prefix.
root = os.path.abspath(root) + os.path.sep
# The trailing slash also needs to be temporarily added back
# the requested path so a request to root/ will match.
if not (absolute_path + os.path.sep).startswith(root): if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory", raise HTTPError(403, "%s is not in root static directory",
self.path) self.path)
@ -2493,7 +2498,19 @@ class StaticFileHandler(RequestHandler):
.. versionadded:: 3.1 .. versionadded:: 3.1
""" """
mime_type, encoding = mimetypes.guess_type(self.absolute_path) mime_type, encoding = mimetypes.guess_type(self.absolute_path)
return mime_type # per RFC 6713, use the appropriate type for a gzip compressed file
if encoding == "gzip":
return "application/gzip"
# As of 2015-07-21 there is no bzip2 encoding defined at
# http://www.iana.org/assignments/media-types/media-types.xhtml
# So for that (and any other encoding), use octet-stream.
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
def set_extra_headers(self, path): def set_extra_headers(self, path):
"""For subclass to add extra headers to the response""" """For subclass to add extra headers to the response"""
@ -2644,7 +2661,16 @@ class GZipContentEncoding(OutputTransform):
CONTENT_TYPES = set(["application/javascript", "application/x-javascript", CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml", "application/xml", "application/atom+xml",
"application/json", "application/xhtml+xml"]) "application/json", "application/xhtml+xml"])
MIN_LENGTH = 5 # Python's GzipFile defaults to level 9, while most other gzip
# tools (including gzip itself) default to 6, which is probably a
# better CPU/size tradeoff.
GZIP_LEVEL = 6
# Responses that are too short are unlikely to benefit from gzipping
# after considering the "Content-Encoding: gzip" header and the header
# inside the gzip encoding.
# Note that responses written in multiple chunks will be compressed
# regardless of size.
MIN_LENGTH = 1024
def __init__(self, request): def __init__(self, request):
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "") self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
@ -2665,7 +2691,8 @@ class GZipContentEncoding(OutputTransform):
if self._gzipping: if self._gzipping:
headers["Content-Encoding"] = "gzip" headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO() self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value) self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value,
compresslevel=self.GZIP_LEVEL)
chunk = self.transform_chunk(chunk, finishing) chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers: if "Content-Length" in headers:
# The original content length is no longer correct. # The original content length is no longer correct.

View file

@ -444,7 +444,8 @@ class _PerMessageDeflateCompressor(object):
self._compressor = None self._compressor = None
def _create_compressor(self): def _create_compressor(self):
return zlib.compressobj(-1, zlib.DEFLATED, -self._max_wbits) return zlib.compressobj(tornado.web.GZipContentEncoding.GZIP_LEVEL,
zlib.DEFLATED, -self._max_wbits)
def compress(self, data): def compress(self, data):
compressor = self._compressor or self._create_compressor() compressor = self._compressor or self._create_compressor()