Update Tornado Web Server 4.5.1 (79b2683) → 5.0.1 (35a538f).

This commit is contained in:
Prinz23 2018-03-27 17:13:58 +01:00 committed by JackDandy
parent 11b05e3699
commit e8ade6ffcf
46 changed files with 2165 additions and 1173 deletions

View file

@ -18,6 +18,7 @@
* Update scandir 1.3 to 1.6 (c3592ee) * Update scandir 1.3 to 1.6 (c3592ee)
* Update SimpleJSON library 3.10.0 (c52efea) to 3.13.2 (6ffddbe) * Update SimpleJSON library 3.10.0 (c52efea) to 3.13.2 (6ffddbe)
* Update Six compatibility library 1.10.0 (r433) to 1.11.0 (68112f3) * Update Six compatibility library 1.10.0 (r433) to 1.11.0 (68112f3)
* Update Tornado Web Server 4.5.1 (79b2683) to 5.0.1 (35a538f)
[develop changelog] [develop changelog]

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #
@ -25,5 +24,5 @@ from __future__ import absolute_import, division, print_function
# is zero for an official release, positive for a development branch, # is zero for an official release, positive for a development branch,
# or negative for a release candidate or beta (after the base version # or negative for a release candidate or beta (after the base version
# number has been incremented) # number has been incremented)
version = "4.5.1" version = "5.1.dev1"
version_info = (4, 5, 1, 0) version_info = (5, 1, 0, -100)

View file

@ -1,5 +1,4 @@
#!/usr/bin/env python # -*- coding: utf-8 -*-
# coding: utf-8
# #
# Copyright 2012 Facebook # Copyright 2012 Facebook
# #

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #
@ -74,8 +73,11 @@ import hashlib
import hmac import hmac
import time import time
import uuid import uuid
import warnings
from tornado.concurrent import TracebackFuture, return_future, chain_future from tornado.concurrent import (Future, return_future, chain_future,
future_set_exc_info,
future_set_result_unless_cancelled)
from tornado import gen from tornado import gen
from tornado import httpclient from tornado import httpclient
from tornado import escape from tornado import escape
@ -112,14 +114,19 @@ def _auth_return_future(f):
Note that when using this decorator the ``callback`` parameter Note that when using this decorator the ``callback`` parameter
inside the function will actually be a future. inside the function will actually be a future.
.. deprecated:: 5.1
Will be removed in 6.0.
""" """
replacer = ArgReplacer(f, 'callback') replacer = ArgReplacer(f, 'callback')
@functools.wraps(f) @functools.wraps(f)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
future = TracebackFuture() future = Future()
callback, args, kwargs = replacer.replace(future, args, kwargs) callback, args, kwargs = replacer.replace(future, args, kwargs)
if callback is not None: if callback is not None:
warnings.warn("callback arguments are deprecated, use the returned Future instead",
DeprecationWarning)
future.add_done_callback( future.add_done_callback(
functools.partial(_auth_future_to_callback, callback)) functools.partial(_auth_future_to_callback, callback))
@ -127,7 +134,7 @@ def _auth_return_future(f):
if future.done(): if future.done():
return False return False
else: else:
future.set_exc_info((typ, value, tb)) future_set_exc_info(future, (typ, value, tb))
return True return True
with ExceptionStackContext(handle_exception): with ExceptionStackContext(handle_exception):
f(*args, **kwargs) f(*args, **kwargs)
@ -161,6 +168,11 @@ class OpenIdMixin(object):
not strictly necessary as this method is synchronous, not strictly necessary as this method is synchronous,
but they are supplied for consistency with but they are supplied for consistency with
`OAuthMixin.authorize_redirect`. `OAuthMixin.authorize_redirect`.
.. deprecated:: 5.1
The ``callback`` argument and returned awaitable will be removed
in Tornado 6.0; this will be an ordinary synchronous function.
""" """
callback_uri = callback_uri or self.request.uri callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs) args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
@ -178,6 +190,11 @@ class OpenIdMixin(object):
is present and `authenticate_redirect` if it is not). is present and `authenticate_redirect` if it is not).
The result of this method will generally be used to set a cookie. The result of this method will generally be used to set a cookie.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
""" """
# Verify the OpenID response via direct request to the OP # Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.items()) args = dict((k, v[-1]) for k, v in self.request.arguments.items())
@ -295,7 +312,7 @@ class OpenIdMixin(object):
claimed_id = self.get_argument("openid.claimed_id", None) claimed_id = self.get_argument("openid.claimed_id", None)
if claimed_id: if claimed_id:
user["claimed_id"] = claimed_id user["claimed_id"] = claimed_id
future.set_result(user) future_set_result_unless_cancelled(future, user)
def get_auth_http_client(self): def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests. """Returns the `.AsyncHTTPClient` instance to be used for auth requests.
@ -328,25 +345,29 @@ class OAuthMixin(object):
"""Redirects the user to obtain OAuth authorization for this service. """Redirects the user to obtain OAuth authorization for this service.
The ``callback_uri`` may be omitted if you have previously The ``callback_uri`` may be omitted if you have previously
registered a callback URI with the third-party service. For registered a callback URI with the third-party service. For
some services (including Friendfeed), you must use a some services, you must use a previously-registered callback
previously-registered callback URI and cannot specify a URI and cannot specify a callback via this method.
callback via this method.
This method sets a cookie called ``_oauth_request_token`` which is This method sets a cookie called ``_oauth_request_token`` which is
subsequently used (and cleared) in `get_authenticated_user` for subsequently used (and cleared) in `get_authenticated_user` for
security purposes. security purposes.
Note that this method is asynchronous, although it calls This method is asynchronous and must be called with ``await``
`.RequestHandler.finish` for you so it may not be necessary or ``yield`` (This is different from other ``auth*_redirect``
to pass a callback or use the `.Future` it returns. However, methods defined in this module). It calls
if this method is called from a function decorated with `.RequestHandler.finish` for you so you should not write any
`.gen.coroutine`, you must call it with ``yield`` to keep the other response after it returns.
response from being closed prematurely.
.. versionchanged:: 3.1 .. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`. compatibility with `.gen.coroutine`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
""" """
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False): if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback") raise Exception("This service does not support oauth_callback")
@ -380,6 +401,11 @@ class OAuthMixin(object):
requests to this service on behalf of the user. The dictionary will requests to this service on behalf of the user. The dictionary will
also contain other fields such as ``name``, depending on the service also contain other fields such as ``name``, depending on the service
used. used.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
""" """
future = callback future = callback
request_key = escape.utf8(self.get_argument("oauth_token")) request_key = escape.utf8(self.get_argument("oauth_token"))
@ -390,7 +416,8 @@ class OAuthMixin(object):
"Missing OAuth request token cookie")) "Missing OAuth request token cookie"))
return return
self.clear_cookie("_oauth_request_token") self.clear_cookie("_oauth_request_token")
cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")] cookie_key, cookie_secret = [
base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
if cookie_key != request_key: if cookie_key != request_key:
future.set_exception(AuthError( future.set_exception(AuthError(
"Request token does not match cookie")) "Request token does not match cookie"))
@ -477,7 +504,9 @@ class OAuthMixin(object):
return return
access_token = _oauth_parse_response(response.body) access_token = _oauth_parse_response(response.body)
self._oauth_get_user_future(access_token).add_done_callback( fut = self._oauth_get_user_future(access_token)
fut = gen.convert_yielded(fut)
fut.add_done_callback(
functools.partial(self._on_oauth_get_user, access_token, future)) functools.partial(self._on_oauth_get_user, access_token, future))
def _oauth_consumer_token(self): def _oauth_consumer_token(self):
@ -502,7 +531,18 @@ class OAuthMixin(object):
For backwards compatibility, the callback-based ``_oauth_get_user`` For backwards compatibility, the callback-based ``_oauth_get_user``
method is also supported. method is also supported.
.. versionchanged:: 5.1
Subclasses may also define this method with ``async def``.
.. deprecated:: 5.1
The ``_oauth_get_user`` fallback is deprecated and support for it
will be removed in 6.0.
""" """
warnings.warn("_oauth_get_user is deprecated, override _oauth_get_user_future instead",
DeprecationWarning)
# By default, call the old-style _oauth_get_user, but new code # By default, call the old-style _oauth_get_user, but new code
# should override this method instead. # should override this method instead.
self._oauth_get_user(access_token, callback) self._oauth_get_user(access_token, callback)
@ -519,7 +559,7 @@ class OAuthMixin(object):
future.set_exception(AuthError("Error getting user")) future.set_exception(AuthError("Error getting user"))
return return
user["access_token"] = access_token user["access_token"] = access_token
future.set_result(user) future_set_result_unless_cancelled(future, user)
def _oauth_request_parameters(self, url, access_token, parameters={}, def _oauth_request_parameters(self, url, access_token, parameters={},
method="GET"): method="GET"):
@ -586,6 +626,11 @@ class OAuth2Mixin(object):
not strictly necessary as this method is synchronous, not strictly necessary as this method is synchronous,
but they are supplied for consistency with but they are supplied for consistency with
`OAuthMixin.authorize_redirect`. `OAuthMixin.authorize_redirect`.
.. deprecated:: 5.1
The ``callback`` argument and returned awaitable will be removed
in Tornado 6.0; this will be an ordinary synchronous function.
""" """
args = { args = {
"redirect_uri": redirect_uri, "redirect_uri": redirect_uri,
@ -646,6 +691,11 @@ class OAuth2Mixin(object):
:hide: :hide:
.. versionadded:: 4.3 .. versionadded:: 4.3
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
""" """
all_args = {} all_args = {}
if access_token: if access_token:
@ -668,7 +718,7 @@ class OAuth2Mixin(object):
(response.error, response.request.url))) (response.error, response.request.url)))
return return
future.set_result(escape.json_decode(response.body)) future_set_result_unless_cancelled(future, escape.json_decode(response.body))
def get_auth_http_client(self): def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests. """Returns the `.AsyncHTTPClient` instance to be used for auth requests.
@ -732,6 +782,11 @@ class TwitterMixin(OAuthMixin):
.. versionchanged:: 3.1 .. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`. compatibility with `.gen.coroutine`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
""" """
http = self.get_auth_http_client() http = self.get_auth_http_client()
http.fetch(self._oauth_request_token_url(callback_uri=callback_uri), http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),
@ -779,6 +834,10 @@ class TwitterMixin(OAuthMixin):
.. testoutput:: .. testoutput::
:hide: :hide:
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
""" """
if path.startswith('http:') or path.startswith('https:'): if path.startswith('http:') or path.startswith('https:'):
# Raw urls are useful for e.g. search which doesn't follow the # Raw urls are useful for e.g. search which doesn't follow the
@ -811,7 +870,7 @@ class TwitterMixin(OAuthMixin):
"Error response %s fetching %s" % (response.error, "Error response %s fetching %s" % (response.error,
response.request.url))) response.request.url)))
return return
future.set_result(escape.json_decode(response.body)) future_set_result_unless_cancelled(future, escape.json_decode(response.body))
def _oauth_consumer_token(self): def _oauth_consumer_token(self):
self.require_setting("twitter_consumer_key", "Twitter OAuth") self.require_setting("twitter_consumer_key", "Twitter OAuth")
@ -848,8 +907,8 @@ class GoogleOAuth2Mixin(OAuth2Mixin):
.. versionadded:: 3.2 .. versionadded:: 3.2
""" """
_OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth" _OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/v2/auth"
_OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token" _OAUTH_ACCESS_TOKEN_URL = "https://www.googleapis.com/oauth2/v4/token"
_OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo" _OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo"
_OAUTH_NO_CALLBACKS = False _OAUTH_NO_CALLBACKS = False
_OAUTH_SETTINGS_KEY = 'google_oauth' _OAUTH_SETTINGS_KEY = 'google_oauth'
@ -894,7 +953,11 @@ class GoogleOAuth2Mixin(OAuth2Mixin):
.. testoutput:: .. testoutput::
:hide: :hide:
""" .. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
""" # noqa: E501
http = self.get_auth_http_client() http = self.get_auth_http_client()
body = urllib_parse.urlencode({ body = urllib_parse.urlencode({
"redirect_uri": redirect_uri, "redirect_uri": redirect_uri,
@ -906,7 +969,9 @@ class GoogleOAuth2Mixin(OAuth2Mixin):
http.fetch(self._OAUTH_ACCESS_TOKEN_URL, http.fetch(self._OAUTH_ACCESS_TOKEN_URL,
functools.partial(self._on_access_token, callback), functools.partial(self._on_access_token, callback),
method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body) method="POST",
headers={'Content-Type': 'application/x-www-form-urlencoded'},
body=body)
def _on_access_token(self, future, response): def _on_access_token(self, future, response):
"""Callback function for the exchange to the access token.""" """Callback function for the exchange to the access token."""
@ -915,7 +980,7 @@ class GoogleOAuth2Mixin(OAuth2Mixin):
return return
args = escape.json_decode(response.body) args = escape.json_decode(response.body)
future.set_result(args) future_set_result_unless_cancelled(future, args)
class FacebookGraphMixin(OAuth2Mixin): class FacebookGraphMixin(OAuth2Mixin):
@ -963,11 +1028,17 @@ class FacebookGraphMixin(OAuth2Mixin):
Tornado it will change from a string to an integer. Tornado it will change from a string to an integer.
* ``id``, ``name``, ``first_name``, ``last_name``, ``locale``, ``picture``, * ``id``, ``name``, ``first_name``, ``last_name``, ``locale``, ``picture``,
``link``, plus any fields named in the ``extra_fields`` argument. These ``link``, plus any fields named in the ``extra_fields`` argument. These
fields are copied from the Facebook graph API `user object <https://developers.facebook.com/docs/graph-api/reference/user>`_ fields are copied from the Facebook graph API
`user object <https://developers.facebook.com/docs/graph-api/reference/user>`_
.. versionchanged:: 4.5 .. versionchanged:: 4.5
The ``session_expires`` field was updated to support changes made to the The ``session_expires`` field was updated to support changes made to the
Facebook API in March 2017. Facebook API in March 2017.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
""" """
http = self.get_auth_http_client() http = self.get_auth_http_client()
args = { args = {
@ -986,6 +1057,7 @@ class FacebookGraphMixin(OAuth2Mixin):
functools.partial(self._on_access_token, redirect_uri, client_id, functools.partial(self._on_access_token, redirect_uri, client_id,
client_secret, callback, fields)) client_secret, callback, fields))
@gen.coroutine
def _on_access_token(self, redirect_uri, client_id, client_secret, def _on_access_token(self, redirect_uri, client_id, client_secret,
future, fields, response): future, fields, response):
if response.error: if response.error:
@ -998,10 +1070,8 @@ class FacebookGraphMixin(OAuth2Mixin):
"expires_in": args.get("expires_in") "expires_in": args.get("expires_in")
} }
self.facebook_request( user = yield self.facebook_request(
path="/me", path="/me",
callback=functools.partial(
self._on_get_user_info, future, session, fields),
access_token=session["access_token"], access_token=session["access_token"],
appsecret_proof=hmac.new(key=client_secret.encode('utf8'), appsecret_proof=hmac.new(key=client_secret.encode('utf8'),
msg=session["access_token"].encode('utf8'), msg=session["access_token"].encode('utf8'),
@ -1009,9 +1079,8 @@ class FacebookGraphMixin(OAuth2Mixin):
fields=",".join(fields) fields=",".join(fields)
) )
def _on_get_user_info(self, future, session, fields, user):
if user is None: if user is None:
future.set_result(None) future_set_result_unless_cancelled(future, None)
return return
fieldmap = {} fieldmap = {}
@ -1024,7 +1093,7 @@ class FacebookGraphMixin(OAuth2Mixin):
# This should change in Tornado 5.0. # This should change in Tornado 5.0.
fieldmap.update({"access_token": session["access_token"], fieldmap.update({"access_token": session["access_token"],
"session_expires": str(session.get("expires_in"))}) "session_expires": str(session.get("expires_in"))})
future.set_result(fieldmap) future_set_result_unless_cancelled(future, fieldmap)
@_auth_return_future @_auth_return_future
def facebook_request(self, path, callback, access_token=None, def facebook_request(self, path, callback, access_token=None,
@ -1045,7 +1114,7 @@ class FacebookGraphMixin(OAuth2Mixin):
Example usage: Example usage:
..testcode:: .. testcode::
class MainHandler(tornado.web.RequestHandler, class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin): tornado.auth.FacebookGraphMixin):
@ -1075,6 +1144,11 @@ class FacebookGraphMixin(OAuth2Mixin):
.. versionchanged:: 3.1 .. versionchanged:: 3.1
Added the ability to override ``self._FACEBOOK_BASE_URL``. Added the ability to override ``self._FACEBOOK_BASE_URL``.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
""" """
url = self._FACEBOOK_BASE_URL + path url = self._FACEBOOK_BASE_URL + path
# Thanks to the _auth_return_future decorator, our "callback" # Thanks to the _auth_return_future decorator, our "callback"

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #
@ -63,12 +62,11 @@ import sys
# file.py gets added to the path, which can cause confusion as imports # file.py gets added to the path, which can cause confusion as imports
# may become relative in spite of the future import. # may become relative in spite of the future import.
# #
# We address the former problem by setting the $PYTHONPATH environment # We address the former problem by reconstructing the original command
# variable before re-execution so the new process will see the correct # line (Python >= 3.4) or by setting the $PYTHONPATH environment
# path. We attempt to address the latter problem when tornado.autoreload # variable (Python < 3.4) before re-execution so the new process will
# is run as __main__, although we can't fix the general case because # see the correct path. We attempt to address the latter problem when
# we cannot reliably reconstruct the original command line # tornado.autoreload is run as __main__.
# (http://bugs.python.org/issue14208).
if __name__ == "__main__": if __name__ == "__main__":
# This sys.path manipulation must come before our imports (as much # This sys.path manipulation must come before our imports (as much
@ -111,13 +109,13 @@ _reload_attempted = False
_io_loops = weakref.WeakKeyDictionary() # type: ignore _io_loops = weakref.WeakKeyDictionary() # type: ignore
def start(io_loop=None, check_time=500): def start(check_time=500):
"""Begins watching source files for changes. """Begins watching source files for changes.
.. versionchanged:: 4.1 .. versionchanged:: 5.0
The ``io_loop`` argument is deprecated. The ``io_loop`` argument (deprecated since version 4.1) has been removed.
""" """
io_loop = io_loop or ioloop.IOLoop.current() io_loop = ioloop.IOLoop.current()
if io_loop in _io_loops: if io_loop in _io_loops:
return return
_io_loops[io_loop] = True _io_loops[io_loop] = True
@ -125,7 +123,7 @@ def start(io_loop=None, check_time=500):
gen_log.warning("tornado.autoreload started more than once in the same process") gen_log.warning("tornado.autoreload started more than once in the same process")
modify_times = {} modify_times = {}
callback = functools.partial(_reload_on_update, modify_times) callback = functools.partial(_reload_on_update, modify_times)
scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop) scheduler = ioloop.PeriodicCallback(callback, check_time)
scheduler.start() scheduler.start()
@ -137,7 +135,7 @@ def wait():
the command-line interface in `main`) the command-line interface in `main`)
""" """
io_loop = ioloop.IOLoop() io_loop = ioloop.IOLoop()
start(io_loop) io_loop.add_callback(start)
io_loop.start() io_loop.start()
@ -209,21 +207,29 @@ def _reload():
# ioloop.set_blocking_log_threshold so it doesn't fire # ioloop.set_blocking_log_threshold so it doesn't fire
# after the exec. # after the exec.
signal.setitimer(signal.ITIMER_REAL, 0, 0) signal.setitimer(signal.ITIMER_REAL, 0, 0)
# sys.path fixes: see comments at top of file. If sys.path[0] is an empty # sys.path fixes: see comments at top of file. If __main__.__spec__
# string, we were (probably) invoked with -m and the effective path # exists, we were invoked with -m and the effective path is about to
# is about to change on re-exec. Add the current directory to $PYTHONPATH # change on re-exec. Reconstruct the original command line to
# to ensure that the new process sees the same path we did. # ensure that the new process sees the same path we did. If
path_prefix = '.' + os.pathsep # __spec__ is not available (Python < 3.4), check instead if
if (sys.path[0] == '' and # sys.path[0] is an empty string and add the current directory to
not os.environ.get("PYTHONPATH", "").startswith(path_prefix)): # $PYTHONPATH.
os.environ["PYTHONPATH"] = (path_prefix + spec = getattr(sys.modules['__main__'], '__spec__', None)
os.environ.get("PYTHONPATH", "")) if spec:
argv = ['-m', spec.name] + sys.argv[1:]
else:
argv = sys.argv
path_prefix = '.' + os.pathsep
if (sys.path[0] == '' and
not os.environ.get("PYTHONPATH", "").startswith(path_prefix)):
os.environ["PYTHONPATH"] = (path_prefix +
os.environ.get("PYTHONPATH", ""))
if not _has_execv: if not _has_execv:
subprocess.Popen([sys.executable] + sys.argv) subprocess.Popen([sys.executable] + argv)
sys.exit(0) sys.exit(0)
else: else:
try: try:
os.execv(sys.executable, [sys.executable] + sys.argv) os.execv(sys.executable, [sys.executable] + argv)
except OSError: except OSError:
# Mac OS X versions prior to 10.6 do not support execv in # Mac OS X versions prior to 10.6 do not support execv in
# a process that contains multiple threads. Instead of # a process that contains multiple threads. Instead of
@ -236,8 +242,7 @@ def _reload():
# Unfortunately the errno returned in this case does not # Unfortunately the errno returned in this case does not
# appear to be consistent, so we can't easily check for # appear to be consistent, so we can't easily check for
# this error specifically. # this error specifically.
os.spawnv(os.P_NOWAIT, sys.executable, os.spawnv(os.P_NOWAIT, sys.executable, [sys.executable] + argv)
[sys.executable] + sys.argv)
# At this point the IOLoop has been closed and finally # At this point the IOLoop has been closed and finally
# blocks will experience errors if we allow the stack to # blocks will experience errors if we allow the stack to
# unwind, so just exit uncleanly. # unwind, so just exit uncleanly.

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2012 Facebook # Copyright 2012 Facebook
# #
@ -13,13 +12,19 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""Utilities for working with threads and ``Futures``. """Utilities for working with ``Future`` objects.
``Futures`` are a pattern for concurrent programming introduced in ``Futures`` are a pattern for concurrent programming introduced in
Python 3.2 in the `concurrent.futures` package. This package defines Python 3.2 in the `concurrent.futures` package, and also adopted (in a
a mostly-compatible `Future` class designed for use from coroutines, slightly different form) in Python 3.4's `asyncio` package. This
as well as some utility functions for interacting with the package defines a ``Future`` class that is an alias for `asyncio.Future`
`concurrent.futures` package. when available, and a compatible implementation for older versions of
Python. It also includes some utility functions for interacting with
``Future`` objects.
While this package is an important part of Tornado's internal
implementation, applications rarely need to interact with it
directly.
""" """
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
@ -28,6 +33,7 @@ import platform
import textwrap import textwrap
import traceback import traceback
import sys import sys
import warnings
from tornado.log import app_log from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext, wrap from tornado.stack_context import ExceptionStackContext, wrap
@ -38,6 +44,11 @@ try:
except ImportError: except ImportError:
futures = None futures = None
try:
import asyncio
except ImportError:
asyncio = None
try: try:
import typing import typing
except ImportError: except ImportError:
@ -138,16 +149,17 @@ class Future(object):
Tornado they are normally used with `.IOLoop.add_future` or by Tornado they are normally used with `.IOLoop.add_future` or by
yielding them in a `.gen.coroutine`. yielding them in a `.gen.coroutine`.
`tornado.concurrent.Future` is similar to `tornado.concurrent.Future` is an alias for `asyncio.Future` when
`concurrent.futures.Future`, but not thread-safe (and therefore that package is available (Python 3.4+). Unlike
faster for use with single-threaded event loops). `concurrent.futures.Future`, the ``Futures`` used by Tornado and
`asyncio` are not thread-safe (and therefore faster for use with
single-threaded event loops).
In addition to ``exception`` and ``set_exception``, methods ``exc_info`` In addition to ``exception`` and ``set_exception``, Tornado's
and ``set_exc_info`` are supported to capture tracebacks in Python 2. ``Future`` implementation supports storing an ``exc_info`` triple
The traceback is automatically available in Python 3, but in the to support better tracebacks on Python 2. To set an ``exc_info``
Python 2 futures backport this information is discarded. triple, use `future_set_exc_info`, and to retrieve one, call
This functionality was previously available in a separate class `result()` (which will raise it).
``TracebackFuture``, which is now a deprecated alias for this class.
.. versionchanged:: 4.0 .. versionchanged:: 4.0
`tornado.concurrent.Future` is always a thread-unsafe ``Future`` `tornado.concurrent.Future` is always a thread-unsafe ``Future``
@ -164,6 +176,17 @@ class Future(object):
where it results in undesired logging it may be necessary to where it results in undesired logging it may be necessary to
suppress the logging by ensuring that the exception is observed: suppress the logging by ensuring that the exception is observed:
``f.add_done_callback(lambda f: f.exception())``. ``f.add_done_callback(lambda f: f.exception())``.
.. versionchanged:: 5.0
This class was previoiusly available under the name
``TracebackFuture``. This name, which was deprecated since
version 4.0, has been removed. When `asyncio` is available
``tornado.concurrent.Future`` is now an alias for
`asyncio.Future`. Like `asyncio.Future`, callbacks are now
always scheduled on the `.IOLoop` and are never run
synchronously.
""" """
def __init__(self): def __init__(self):
self._done = False self._done = False
@ -265,7 +288,8 @@ class Future(object):
`add_done_callback` directly. `add_done_callback` directly.
""" """
if self._done: if self._done:
fn(self) from tornado.ioloop import IOLoop
IOLoop.current().add_callback(fn, self)
else: else:
self._callbacks.append(fn) self._callbacks.append(fn)
@ -320,13 +344,12 @@ class Future(object):
def _set_done(self): def _set_done(self):
self._done = True self._done = True
for cb in self._callbacks: if self._callbacks:
try: from tornado.ioloop import IOLoop
cb(self) loop = IOLoop.current()
except Exception: for cb in self._callbacks:
app_log.exception('Exception in callback %r for %r', loop.add_callback(cb, self)
cb, self) self._callbacks = None
self._callbacks = None
# On Python 3.3 or older, objects with a destructor part of a reference # On Python 3.3 or older, objects with a destructor part of a reference
# cycle are never destroyed. It's no longer the case on Python 3.4 thanks to # cycle are never destroyed. It's no longer the case on Python 3.4 thanks to
@ -344,7 +367,8 @@ class Future(object):
self, ''.join(tb).rstrip()) self, ''.join(tb).rstrip())
TracebackFuture = Future if asyncio is not None:
Future = asyncio.Future # noqa
if futures is None: if futures is None:
FUTURES = Future # type: typing.Union[type, typing.Tuple[type, ...]] FUTURES = Future # type: typing.Union[type, typing.Tuple[type, ...]]
@ -358,11 +382,11 @@ def is_future(x):
class DummyExecutor(object): class DummyExecutor(object):
def submit(self, fn, *args, **kwargs): def submit(self, fn, *args, **kwargs):
future = TracebackFuture() future = Future()
try: try:
future.set_result(fn(*args, **kwargs)) future_set_result_unless_cancelled(future, fn(*args, **kwargs))
except Exception: except Exception:
future.set_exc_info(sys.exc_info()) future_set_exc_info(future, sys.exc_info())
return future return future
def shutdown(self, wait=True): def shutdown(self, wait=True):
@ -378,29 +402,53 @@ def run_on_executor(*args, **kwargs):
The decorated method may be called with a ``callback`` keyword The decorated method may be called with a ``callback`` keyword
argument and returns a future. argument and returns a future.
The `.IOLoop` and executor to be used are determined by the ``io_loop`` The executor to be used is determined by the ``executor``
and ``executor`` attributes of ``self``. To use different attributes, attributes of ``self``. To use a different attribute name, pass a
pass keyword arguments to the decorator:: keyword argument to the decorator::
@run_on_executor(executor='_thread_pool') @run_on_executor(executor='_thread_pool')
def foo(self): def foo(self):
pass pass
This decorator should not be confused with the similarly-named
`.IOLoop.run_in_executor`. In general, using ``run_in_executor``
when *calling* a blocking method is recommended instead of using
this decorator when *defining* a method. If compatibility with older
versions of Tornado is required, consider defining an executor
and using ``executor.submit()`` at the call site.
.. versionchanged:: 4.2 .. versionchanged:: 4.2
Added keyword arguments to use alternative attributes. Added keyword arguments to use alternative attributes.
.. versionchanged:: 5.0
Always uses the current IOLoop instead of ``self.io_loop``.
.. versionchanged:: 5.1
Returns a `.Future` compatible with ``await`` instead of a
`concurrent.futures.Future`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in
6.0. The decorator itself is discouraged in new code but will
not be removed in 6.0.
""" """
def run_on_executor_decorator(fn): def run_on_executor_decorator(fn):
executor = kwargs.get("executor", "executor") executor = kwargs.get("executor", "executor")
io_loop = kwargs.get("io_loop", "io_loop")
@functools.wraps(fn) @functools.wraps(fn)
def wrapper(self, *args, **kwargs): def wrapper(self, *args, **kwargs):
callback = kwargs.pop("callback", None) callback = kwargs.pop("callback", None)
future = getattr(self, executor).submit(fn, self, *args, **kwargs) async_future = Future()
conc_future = getattr(self, executor).submit(fn, self, *args, **kwargs)
chain_future(conc_future, async_future)
if callback: if callback:
getattr(self, io_loop).add_future( warnings.warn("callback arguments are deprecated, use the returned Future instead",
future, lambda future: callback(future.result())) DeprecationWarning)
return future from tornado.ioloop import IOLoop
IOLoop.current().add_future(
async_future, lambda future: callback(future.result()))
return async_future
return wrapper return wrapper
if args and kwargs: if args and kwargs:
raise ValueError("cannot combine positional and keyword args") raise ValueError("cannot combine positional and keyword args")
@ -418,6 +466,10 @@ def return_future(f):
"""Decorator to make a function that returns via callback return a """Decorator to make a function that returns via callback return a
`Future`. `Future`.
This decorator was provided to ease the transition from
callback-oriented code to coroutines. It is not recommended for
new code.
The wrapped function should take a ``callback`` keyword argument The wrapped function should take a ``callback`` keyword argument
and invoke it with one argument when it has finished. To signal failure, and invoke it with one argument when it has finished. To signal failure,
the function can simply raise an exception (which will be the function can simply raise an exception (which will be
@ -425,7 +477,7 @@ def return_future(f):
From the caller's perspective, the callback argument is optional. From the caller's perspective, the callback argument is optional.
If one is given, it will be invoked when the function is complete If one is given, it will be invoked when the function is complete
with `Future.result()` as an argument. If the function fails, the with ``Future.result()`` as an argument. If the function fails, the
callback will not be run and an exception will be raised into the callback will not be run and an exception will be raised into the
surrounding `.StackContext`. surrounding `.StackContext`.
@ -452,18 +504,28 @@ def return_future(f):
Note that ``@return_future`` and ``@gen.engine`` can be applied to the Note that ``@return_future`` and ``@gen.engine`` can be applied to the
same function, provided ``@return_future`` appears first. However, same function, provided ``@return_future`` appears first. However,
consider using ``@gen.coroutine`` instead of this combination. consider using ``@gen.coroutine`` instead of this combination.
.. versionchanged:: 5.1
Now raises a `.DeprecationWarning` if a callback argument is passed to
the decorated function and deprecation warnings are enabled.
.. deprecated:: 5.1
New code should use coroutines directly instead of wrapping
callback-based code with this decorator.
""" """
replacer = ArgReplacer(f, 'callback') replacer = ArgReplacer(f, 'callback')
@functools.wraps(f) @functools.wraps(f)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
future = TracebackFuture() future = Future()
callback, args, kwargs = replacer.replace( callback, args, kwargs = replacer.replace(
lambda value=_NO_RESULT: future.set_result(value), lambda value=_NO_RESULT: future_set_result_unless_cancelled(future, value),
args, kwargs) args, kwargs)
def handle_error(typ, value, tb): def handle_error(typ, value, tb):
future.set_exc_info((typ, value, tb)) future_set_exc_info(future, (typ, value, tb))
return True return True
exc_info = None exc_info = None
with ExceptionStackContext(handle_error): with ExceptionStackContext(handle_error):
@ -489,13 +551,16 @@ def return_future(f):
# immediate exception, and again when the future resolves and # immediate exception, and again when the future resolves and
# the callback triggers its exception by calling future.result()). # the callback triggers its exception by calling future.result()).
if callback is not None: if callback is not None:
warnings.warn("callback arguments are deprecated, use the returned Future instead",
DeprecationWarning)
def run_callback(future): def run_callback(future):
result = future.result() result = future.result()
if result is _NO_RESULT: if result is _NO_RESULT:
callback() callback()
else: else:
callback(future.result()) callback(future.result())
future.add_done_callback(wrap(run_callback)) future_add_done_callback(future, wrap(run_callback))
return future return future
return wrapper return wrapper
@ -505,17 +570,72 @@ def chain_future(a, b):
The result (success or failure) of ``a`` will be copied to ``b``, unless The result (success or failure) of ``a`` will be copied to ``b``, unless
``b`` has already been completed or cancelled by the time ``a`` finishes. ``b`` has already been completed or cancelled by the time ``a`` finishes.
.. versionchanged:: 5.0
Now accepts both Tornado/asyncio `Future` objects and
`concurrent.futures.Future`.
""" """
def copy(future): def copy(future):
assert future is a assert future is a
if b.done(): if b.done():
return return
if (isinstance(a, TracebackFuture) and if (hasattr(a, 'exc_info') and
isinstance(b, TracebackFuture) and
a.exc_info() is not None): a.exc_info() is not None):
b.set_exc_info(a.exc_info()) future_set_exc_info(b, a.exc_info())
elif a.exception() is not None: elif a.exception() is not None:
b.set_exception(a.exception()) b.set_exception(a.exception())
else: else:
b.set_result(a.result()) b.set_result(a.result())
a.add_done_callback(copy) if isinstance(a, Future):
future_add_done_callback(a, copy)
else:
# concurrent.futures.Future
from tornado.ioloop import IOLoop
IOLoop.current().add_future(a, copy)
def future_set_result_unless_cancelled(future, value):
"""Set the given ``value`` as the `Future`'s result, if not cancelled.
Avoids asyncio.InvalidStateError when calling set_result() on
a cancelled `asyncio.Future`.
.. versionadded:: 5.0
"""
if not future.cancelled():
future.set_result(value)
def future_set_exc_info(future, exc_info):
"""Set the given ``exc_info`` as the `Future`'s exception.
Understands both `asyncio.Future` and Tornado's extensions to
enable better tracebacks on Python 2.
.. versionadded:: 5.0
"""
if hasattr(future, 'set_exc_info'):
# Tornado's Future
future.set_exc_info(exc_info)
else:
# asyncio.Future
future.set_exception(exc_info[1])
def future_add_done_callback(future, callback):
"""Arrange to call ``callback`` when ``future`` is complete.
``callback`` is invoked with one argument, the ``future``.
If ``future`` is already done, ``callback`` is invoked immediately.
This may differ from the behavior of ``Future.add_done_callback``,
which makes no such guarantee.
.. versionadded:: 5.0
"""
if future.done():
callback(future)
else:
future.add_done_callback(callback)

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #
@ -37,8 +36,8 @@ curl_log = logging.getLogger('tornado.curl_httpclient')
class CurlAsyncHTTPClient(AsyncHTTPClient): class CurlAsyncHTTPClient(AsyncHTTPClient):
def initialize(self, io_loop, max_clients=10, defaults=None): def initialize(self, max_clients=10, defaults=None):
super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults) super(CurlAsyncHTTPClient, self).initialize(defaults=defaults)
self._multi = pycurl.CurlMulti() self._multi = pycurl.CurlMulti()
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout) self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket) self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
@ -53,7 +52,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
# SOCKETFUNCTION. Mitigate the effects of such bugs by # SOCKETFUNCTION. Mitigate the effects of such bugs by
# forcing a periodic scan of all active requests. # forcing a periodic scan of all active requests.
self._force_timeout_callback = ioloop.PeriodicCallback( self._force_timeout_callback = ioloop.PeriodicCallback(
self._handle_force_timeout, 1000, io_loop=io_loop) self._handle_force_timeout, 1000)
self._force_timeout_callback.start() self._force_timeout_callback.start()
# Work around a bug in libcurl 7.29.0: Some fields in the curl # Work around a bug in libcurl 7.29.0: Some fields in the curl
@ -74,6 +73,12 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
self._multi.close() self._multi.close()
super(CurlAsyncHTTPClient, self).close() super(CurlAsyncHTTPClient, self).close()
# Set below properties to None to reduce the reference count of current
# instance, because those properties hold some methods of current
# instance that will case circular reference.
self._force_timeout_callback = None
self._multi = None
def fetch_impl(self, request, callback): def fetch_impl(self, request, callback):
self._requests.append((request, callback)) self._requests.append((request, callback))
self._process_queue() self._process_queue()
@ -255,6 +260,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
queue=info["curl_start_time"] - info["request"].start_time, queue=info["curl_start_time"] - info["request"].start_time,
namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME), namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
connect=curl.getinfo(pycurl.CONNECT_TIME), connect=curl.getinfo(pycurl.CONNECT_TIME),
appconnect=curl.getinfo(pycurl.APPCONNECT_TIME),
pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME), pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME),
starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME), starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME),
total=curl.getinfo(pycurl.TOTAL_TIME), total=curl.getinfo(pycurl.TOTAL_TIME),
@ -494,8 +500,10 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
def _curl_debug(self, debug_type, debug_msg): def _curl_debug(self, debug_type, debug_msg):
debug_types = ('I', '<', '>', '<', '>') debug_types = ('I', '<', '>', '<', '>')
if debug_type == 0: if debug_type == 0:
debug_msg = native_str(debug_msg)
curl_log.debug('%s', debug_msg.strip()) curl_log.debug('%s', debug_msg.strip())
elif debug_type in (1, 2): elif debug_type in (1, 2):
debug_msg = native_str(debug_msg)
for line in debug_msg.splitlines(): for line in debug_msg.splitlines():
curl_log.debug('%s %s', debug_types[debug_type], line) curl_log.debug('%s %s', debug_types[debug_type], line)
elif debug_type == 4: elif debug_type == 4:

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #
@ -274,7 +273,9 @@ def recursive_unicode(obj):
# This regex should avoid those problems. # This regex should avoid those problems.
# Use to_unicode instead of tornado.util.u - we don't want backslashes getting # Use to_unicode instead of tornado.util.u - we don't want backslashes getting
# processed as escapes. # processed as escapes.
_URL_RE = re.compile(to_unicode(r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&amp;|&quot;)*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&amp;|&quot;)*\)))+)""")) _URL_RE = re.compile(to_unicode(
r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&amp;|&quot;)*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&amp;|&quot;)*\)))+)""" # noqa: E501
))
def linkify(text, shorten=False, extra_params="", def linkify(text, shorten=False, extra_params="",
@ -289,24 +290,24 @@ def linkify(text, shorten=False, extra_params="",
* ``shorten``: Long urls will be shortened for display. * ``shorten``: Long urls will be shortened for display.
* ``extra_params``: Extra text to include in the link tag, or a callable * ``extra_params``: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``, e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or:: or::
def extra_params_cb(url): def extra_params_cb(url):
if url.startswith("http://example.com"): if url.startswith("http://example.com"):
return 'class="internal"' return 'class="internal"'
else: else:
return 'class="external" rel="nofollow"' return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb) linkify(text, extra_params=extra_params_cb)
* ``require_protocol``: Only linkify urls which include a protocol. If * ``require_protocol``: Only linkify urls which include a protocol. If
this is False, urls such as www.facebook.com will also be linkified. this is False, urls such as www.facebook.com will also be linkified.
* ``permitted_protocols``: List (or set) of protocols which should be * ``permitted_protocols``: List (or set) of protocols which should be
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp", linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
"mailto"])``. It is very unsafe to include protocols such as "mailto"])``. It is very unsafe to include protocols such as
``javascript``. ``javascript``.
""" """
if extra_params and not callable(extra_params): if extra_params and not callable(extra_params):
extra_params = " " + extra_params.strip() extra_params = " " + extra_params.strip()

View file

@ -1,6 +1,20 @@
"""``tornado.gen`` is a generator-based interface to make it easier to """``tornado.gen`` implements generator-based coroutines.
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator .. note::
The "decorator and generator" approach in this module is a
precursor to native coroutines (using ``async def`` and ``await``)
which were introduced in Python 3.5. Applications that do not
require compatibility with older versions of Python should use
native coroutines instead. Some parts of this module are still
useful with native coroutines, notably `multi`, `sleep`,
`WaitIterator`, and `with_timeout`. Some of these functions have
counterparts in the `asyncio` module which may be used as well,
although the two may not necessarily be 100% compatible.
Coroutines provide an easier way to work in an asynchronous
environment than chaining callbacks. Code using coroutines is
technically asynchronous, but it is written as a single generator
instead of a collection of separate functions. instead of a collection of separate functions.
For example, the following asynchronous handler: For example, the following asynchronous handler:
@ -37,7 +51,7 @@ could be written with ``gen`` as:
:hide: :hide:
Most asynchronous functions in Tornado return a `.Future`; Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`. yielding this object returns its ``Future.result``.
You can also yield a list or dict of ``Futures``, which will be You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will started at the same time and run in parallel; a list or dict of results will
@ -81,15 +95,15 @@ import functools
import itertools import itertools
import os import os
import sys import sys
import textwrap
import types import types
import weakref import warnings
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future from tornado.concurrent import (Future, is_future, chain_future, future_set_exc_info,
future_add_done_callback, future_set_result_unless_cancelled)
from tornado.ioloop import IOLoop from tornado.ioloop import IOLoop
from tornado.log import app_log from tornado.log import app_log
from tornado import stack_context from tornado import stack_context
from tornado.util import PY3, raise_exc_info from tornado.util import PY3, raise_exc_info, TimeoutError
try: try:
try: try:
@ -154,10 +168,6 @@ class ReturnValueIgnoredError(Exception):
pass pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def _value_from_stopiteration(e): def _value_from_stopiteration(e):
try: try:
# StopIteration has a value attribute beginning in py33. # StopIteration has a value attribute beginning in py33.
@ -173,6 +183,21 @@ def _value_from_stopiteration(e):
return None return None
def _create_future():
future = Future()
# Fixup asyncio debug info by removing extraneous stack entries
source_traceback = getattr(future, "_source_traceback", ())
while source_traceback:
# Each traceback entry is equivalent to a
# (filename, self.lineno, self.name, self.line) tuple
filename = source_traceback[-1][0]
if filename == __file__:
del source_traceback[-1]
else:
break
return future
def engine(func): def engine(func):
"""Callback-oriented decorator for asynchronous generators. """Callback-oriented decorator for asynchronous generators.
@ -189,7 +214,14 @@ def engine(func):
they are finished. One notable exception is the they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`, `~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument. which use ``self.finish()`` in place of a callback argument.
.. deprecated:: 5.1
This decorator will be removed in 6.0. Use `coroutine` or
``async def`` instead.
""" """
warnings.warn("gen.engine is deprecated, use gen.coroutine or async def instead",
DeprecationWarning)
func = _make_coroutine_wrapper(func, replace_callback=False) func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func) @functools.wraps(func)
@ -204,11 +236,11 @@ def engine(func):
# The engine interface doesn't give us any way to return # The engine interface doesn't give us any way to return
# errors but to raise them into the stack context. # errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved. # Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback)) future_add_done_callback(future, stack_context.wrap(final_callback))
return wrapper return wrapper
def coroutine(func, replace_callback=True): def coroutine(func):
"""Decorator for asynchronous generators. """Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped Any generator that yields objects from this module must be wrapped
@ -229,9 +261,6 @@ def coroutine(func, replace_callback=True):
``callback`` argument is not visible inside the decorated ``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself. function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning:: .. warning::
When exceptions occur inside a coroutine, the exception When exceptions occur inside a coroutine, the exception
@ -242,30 +271,14 @@ def coroutine(func, replace_callback=True):
`.IOLoop.run_sync` for top-level calls, or passing the `.Future` `.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`. to `.IOLoop.add_future`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
""" """
return _make_coroutine_wrapper(func, replace_callback=True) return _make_coroutine_wrapper(func, replace_callback=True)
# Ties lifetime of runners to their result futures. Github Issue #1769
# Generators, like any object in Python, must be strong referenced
# in order to not be cleaned up by the garbage collector. When using
# coroutines, the Runner object is what strong-refs the inner
# generator. However, the only item that strong-reffed the Runner
# was the last Future that the inner generator yielded (via the
# Future's internal done_callback list). Usually this is enough, but
# it is also possible for this Future to not have any strong references
# other than other objects referenced by the Runner object (usually
# when using other callback patterns and/or weakrefs). In this
# situation, if a garbage collection ran, a cycle would be detected and
# Runner objects could be destroyed along with their inner generators
# and everything in their local scope.
# This map provides strong references to Runner objects as long as
# their result future objects also have strong references (typically
# from the parent coroutine's Runner). This keeps the coroutine's
# Runner alive.
_futures_to_runners = weakref.WeakKeyDictionary()
def _make_coroutine_wrapper(func, replace_callback): def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``. """The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
@ -281,9 +294,11 @@ def _make_coroutine_wrapper(func, replace_callback):
@functools.wraps(wrapped) @functools.wraps(wrapped)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
future = TracebackFuture() future = _create_future()
if replace_callback and 'callback' in kwargs: if replace_callback and 'callback' in kwargs:
warnings.warn("callback arguments are deprecated, use the returned Future instead",
DeprecationWarning, stacklevel=2)
callback = kwargs.pop('callback') callback = kwargs.pop('callback')
IOLoop.current().add_future( IOLoop.current().add_future(
future, lambda future: callback(future.result())) future, lambda future: callback(future.result()))
@ -293,8 +308,12 @@ def _make_coroutine_wrapper(func, replace_callback):
except (Return, StopIteration) as e: except (Return, StopIteration) as e:
result = _value_from_stopiteration(e) result = _value_from_stopiteration(e)
except Exception: except Exception:
future.set_exc_info(sys.exc_info()) future_set_exc_info(future, sys.exc_info())
return future try:
return future
finally:
# Avoid circular references
future = None
else: else:
if isinstance(result, GeneratorType): if isinstance(result, GeneratorType):
# Inline the first iteration of Runner.run. This lets us # Inline the first iteration of Runner.run. This lets us
@ -306,17 +325,26 @@ def _make_coroutine_wrapper(func, replace_callback):
orig_stack_contexts = stack_context._state.contexts orig_stack_contexts = stack_context._state.contexts
yielded = next(result) yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts: if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture() yielded = _create_future()
yielded.set_exception( yielded.set_exception(
stack_context.StackContextInconsistentError( stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused ' 'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)')) 'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e: except (StopIteration, Return) as e:
future.set_result(_value_from_stopiteration(e)) future_set_result_unless_cancelled(future, _value_from_stopiteration(e))
except Exception: except Exception:
future.set_exc_info(sys.exc_info()) future_set_exc_info(future, sys.exc_info())
else: else:
_futures_to_runners[future] = Runner(result, future, yielded) # Provide strong references to Runner objects as long
# as their result future objects also have strong
# references (typically from the parent coroutine's
# Runner). This keeps the coroutine's Runner alive.
# We do this by exploiting the public API
# add_done_callback() instead of putting a private
# attribute on the Future.
# (Github issues #1769, #2229).
runner = Runner(result, future, yielded)
future.add_done_callback(lambda _: runner)
yielded = None yielded = None
try: try:
return future return future
@ -330,7 +358,7 @@ def _make_coroutine_wrapper(func, replace_callback):
# used in the absence of cycles). We can avoid the # used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it. # cycle by clearing the local variable after we return it.
future = None future = None
future.set_result(result) future_set_result_unless_cancelled(future, result)
return future return future
wrapper.__wrapped__ = wrapped wrapper.__wrapped__ = wrapped
@ -444,7 +472,7 @@ class WaitIterator(object):
self._running_future = None self._running_future = None
for future in futures: for future in futures:
future.add_done_callback(self._done_callback) future_add_done_callback(future, self._done_callback)
def done(self): def done(self):
"""Returns True if this iterator has no more results.""" """Returns True if this iterator has no more results."""
@ -460,7 +488,7 @@ class WaitIterator(object):
Note that this `.Future` will not be the same object as any of Note that this `.Future` will not be the same object as any of
the inputs. the inputs.
""" """
self._running_future = TracebackFuture() self._running_future = Future()
if self._finished: if self._finished:
self._return_result(self._finished.popleft()) self._return_result(self._finished.popleft())
@ -482,9 +510,8 @@ class WaitIterator(object):
self.current_future = done self.current_future = done
self.current_index = self._unfinished.pop(done) self.current_index = self._unfinished.pop(done)
@coroutine
def __aiter__(self): def __aiter__(self):
raise Return(self) return self
def __anext__(self): def __anext__(self):
if self.done(): if self.done():
@ -497,8 +524,13 @@ class YieldPoint(object):
"""Base class for objects that may be yielded from the generator. """Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0 .. deprecated:: 4.0
Use `Futures <.Future>` instead. Use `Futures <.Future>` instead. This class and all its subclasses
will be removed in 6.0
""" """
def __init__(self):
warnings.warn("YieldPoint is deprecated, use Futures instead",
DeprecationWarning)
def start(self, runner): def start(self, runner):
"""Called by the runner after the generator has yielded. """Called by the runner after the generator has yielded.
@ -535,9 +567,11 @@ class Callback(YieldPoint):
is given it will be returned by `Wait`. is given it will be returned by `Wait`.
.. deprecated:: 4.0 .. deprecated:: 4.0
Use `Futures <.Future>` instead. Use `Futures <.Future>` instead. This class will be removed in 6.0.
""" """
def __init__(self, key): def __init__(self, key):
warnings.warn("gen.Callback is deprecated, use Futures instead",
DeprecationWarning)
self.key = key self.key = key
def start(self, runner): def start(self, runner):
@ -555,9 +589,11 @@ class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`. """Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0 .. deprecated:: 4.0
Use `Futures <.Future>` instead. Use `Futures <.Future>` instead. This class will be removed in 6.0.
""" """
def __init__(self, key): def __init__(self, key):
warnings.warn("gen.Wait is deprecated, use Futures instead",
DeprecationWarning)
self.key = key self.key = key
def start(self, runner): def start(self, runner):
@ -579,9 +615,11 @@ class WaitAll(YieldPoint):
`WaitAll` is equivalent to yielding a list of `Wait` objects. `WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0 .. deprecated:: 4.0
Use `Futures <.Future>` instead. Use `Futures <.Future>` instead. This class will be removed in 6.0.
""" """
def __init__(self, keys): def __init__(self, keys):
warnings.warn("gen.WaitAll is deprecated, use gen.multi instead",
DeprecationWarning)
self.keys = keys self.keys = keys
def start(self, runner): def start(self, runner):
@ -605,33 +643,43 @@ def Task(func, *args, **kwargs):
``gen.Task`` is now a function that returns a `.Future`, instead of ``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when a subclass of `YieldPoint`. It still behaves the same way when
yielded. yielded.
.. deprecated:: 5.1
This function is deprecated and will be removed in 6.0.
""" """
future = Future() warnings.warn("gen.Task is deprecated, use Futures instead",
DeprecationWarning)
future = _create_future()
def handle_exception(typ, value, tb): def handle_exception(typ, value, tb):
if future.done(): if future.done():
return False return False
future.set_exc_info((typ, value, tb)) future_set_exc_info(future, (typ, value, tb))
return True return True
def set_result(result): def set_result(result):
if future.done(): if future.done():
return return
future.set_result(result) future_set_result_unless_cancelled(future, result)
with stack_context.ExceptionStackContext(handle_exception): with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs) func(*args, callback=_argument_adapter(set_result), **kwargs)
return future return future
class YieldFuture(YieldPoint): class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None): def __init__(self, future):
"""Adapts a `.Future` to the `YieldPoint` interface. """Adapts a `.Future` to the `YieldPoint` interface.
.. versionchanged:: 4.1 .. versionchanged:: 5.0
The ``io_loop`` argument is deprecated. The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. deprecated:: 5.1
This class will be removed in 6.0.
""" """
warnings.warn("YieldFuture is deprecated, use Futures instead",
DeprecationWarning)
self.future = future self.future = future
self.io_loop = io_loop or IOLoop.current() self.io_loop = IOLoop.current()
def start(self, runner): def start(self, runner):
if not self.future.done(): if not self.future.done():
@ -704,6 +752,10 @@ def multi(children, quiet_exceptions=()):
This function is available under the names ``multi()`` and ``Multi()`` This function is available under the names ``multi()`` and ``Multi()``
for historical reasons. for historical reasons.
Cancelling a `.Future` returned by ``multi()`` does not cancel its
children. `asyncio.gather` is similar to ``multi()``, but it does
cancel its children.
.. versionchanged:: 4.2 .. versionchanged:: 4.2
If multiple yieldables fail, any exceptions after the first If multiple yieldables fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions`` (which is raised) will be logged. Added the ``quiet_exceptions``
@ -741,9 +793,11 @@ class MultiYieldPoint(YieldPoint):
remains as an alias for the equivalent `multi` function. remains as an alias for the equivalent `multi` function.
.. deprecated:: 4.3 .. deprecated:: 4.3
Use `multi` instead. Use `multi` instead. This class will be removed in 6.0.
""" """
def __init__(self, children, quiet_exceptions=()): def __init__(self, children, quiet_exceptions=()):
warnings.warn("MultiYieldPoint is deprecated, use Futures instead",
DeprecationWarning)
self.keys = None self.keys = None
if isinstance(children, dict): if isinstance(children, dict):
self.keys = list(children.keys()) self.keys = list(children.keys())
@ -812,12 +866,13 @@ def multi_future(children, quiet_exceptions=()):
else: else:
keys = None keys = None
children = list(map(convert_yielded, children)) children = list(map(convert_yielded, children))
assert all(is_future(i) for i in children) assert all(is_future(i) or isinstance(i, _NullFuture) for i in children)
unfinished_children = set(children) unfinished_children = set(children)
future = Future() future = _create_future()
if not children: if not children:
future.set_result({} if keys is not None else []) future_set_result_unless_cancelled(future,
{} if keys is not None else [])
def callback(f): def callback(f):
unfinished_children.remove(f) unfinished_children.remove(f)
@ -832,18 +887,19 @@ def multi_future(children, quiet_exceptions=()):
app_log.error("Multiple exceptions in yield list", app_log.error("Multiple exceptions in yield list",
exc_info=True) exc_info=True)
else: else:
future.set_exc_info(sys.exc_info()) future_set_exc_info(future, sys.exc_info())
if not future.done(): if not future.done():
if keys is not None: if keys is not None:
future.set_result(dict(zip(keys, result_list))) future_set_result_unless_cancelled(future,
dict(zip(keys, result_list)))
else: else:
future.set_result(result_list) future_set_result_unless_cancelled(future, result_list)
listening = set() listening = set()
for f in children: for f in children:
if f not in listening: if f not in listening:
listening.add(f) listening.add(f)
f.add_done_callback(callback) future_add_done_callback(f, callback)
return future return future
@ -863,18 +919,18 @@ def maybe_future(x):
if is_future(x): if is_future(x):
return x return x
else: else:
fut = Future() fut = _create_future()
fut.set_result(x) fut.set_result(x)
return fut return fut
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()): def with_timeout(timeout, future, quiet_exceptions=()):
"""Wraps a `.Future` (or other yieldable object) in a timeout. """Wraps a `.Future` (or other yieldable object) in a timeout.
Raises `TimeoutError` if the input future does not complete before Raises `tornado.util.TimeoutError` if the input future does not
``timeout``, which may be specified in any form allowed by complete before ``timeout``, which may be specified in any form
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
relative to `.IOLoop.time`) an absolute time relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions`` will be logged unless it is of a type contained in ``quiet_exceptions``
@ -882,6 +938,10 @@ def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
Does not support `YieldPoint` subclasses. Does not support `YieldPoint` subclasses.
The wrapped `.Future` is not canceled when the timeout expires,
permitting it to be reused. `asyncio.wait_for` is similar to this
function but it does cancel the wrapped `.Future` on timeout.
.. versionadded:: 4.0 .. versionadded:: 4.0
.. versionchanged:: 4.1 .. versionchanged:: 4.1
@ -890,6 +950,7 @@ def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
.. versionchanged:: 4.4 .. versionchanged:: 4.4
Added support for yieldable objects other than `.Future`. Added support for yieldable objects other than `.Future`.
""" """
# TODO: allow YieldPoints in addition to other yieldables? # TODO: allow YieldPoints in addition to other yieldables?
# Tricky to do with stack_context semantics. # Tricky to do with stack_context semantics.
@ -900,10 +961,9 @@ def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
# callers and B) concurrent futures can only be cancelled while they are # callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time. # in the queue, so cancellation cannot reliably bound our waiting time.
future = convert_yielded(future) future = convert_yielded(future)
result = Future() result = _create_future()
chain_future(future, result) chain_future(future, result)
if io_loop is None: io_loop = IOLoop.current()
io_loop = IOLoop.current()
def error_callback(future): def error_callback(future):
try: try:
@ -914,17 +974,18 @@ def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
future, exc_info=True) future, exc_info=True)
def timeout_callback(): def timeout_callback():
result.set_exception(TimeoutError("Timeout")) if not result.done():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it. # In case the wrapped future goes on to fail, log it.
future.add_done_callback(error_callback) future_add_done_callback(future, error_callback)
timeout_handle = io_loop.add_timeout( timeout_handle = io_loop.add_timeout(
timeout, timeout_callback) timeout, timeout_callback)
if isinstance(future, Future): if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't # We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also # need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here. # don't care about StackContext here.
future.add_done_callback( future_add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle)) future, lambda future: io_loop.remove_timeout(timeout_handle))
else: else:
# concurrent.futures.Futures may resolve on any thread, so we # concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop. # need to route them back to the IOLoop.
@ -947,15 +1008,31 @@ def sleep(duration):
.. versionadded:: 4.1 .. versionadded:: 4.1
""" """
f = Future() f = _create_future()
IOLoop.current().call_later(duration, lambda: f.set_result(None)) IOLoop.current().call_later(duration,
lambda: future_set_result_unless_cancelled(f, None))
return f return f
_null_future = Future() class _NullFuture(object):
_null_future.set_result(None) """_NullFuture resembles a Future that finished with a result of None.
moment = Future() It's not actually a `Future` to avoid depending on a particular event loop.
Handled as a special case in the coroutine runner.
"""
def result(self):
return None
def done(self):
return True
# _null_future is used as a dummy value in the coroutine runner. It differs
# from moment in that moment always adds a delay of one IOLoop iteration
# while _null_future is processed as soon as possible.
_null_future = _NullFuture()
moment = _NullFuture()
moment.__doc__ = \ moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for """A special object which may be yielded to allow the IOLoop to run for
one iteration. one iteration.
@ -968,9 +1045,9 @@ Usage: ``yield gen.moment``
.. versionadded:: 4.0 .. versionadded:: 4.0
.. deprecated:: 4.5 .. deprecated:: 4.5
``yield None`` is now equivalent to ``yield gen.moment``. ``yield None`` (or ``yield`` with no argument) is now equivalent to
``yield gen.moment``.
""" """
moment.set_result(None)
class Runner(object): class Runner(object):
@ -979,7 +1056,7 @@ class Runner(object):
Maintains information about pending callbacks and their results. Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`) `.Future`)
""" """
def __init__(self, gen, result_future, first_yielded): def __init__(self, gen, result_future, first_yielded):
self.gen = gen self.gen = gen
@ -1023,9 +1100,10 @@ class Runner(object):
self.results[key] = result self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready(): if self.yield_point is not None and self.yield_point.is_ready():
try: try:
self.future.set_result(self.yield_point.get_result()) future_set_result_unless_cancelled(self.future,
self.yield_point.get_result())
except: except:
self.future.set_exc_info(sys.exc_info()) future_set_exc_info(self.future, sys.exc_info())
self.yield_point = None self.yield_point = None
self.run() self.run()
@ -1084,14 +1162,15 @@ class Runner(object):
raise LeakedCallbackError( raise LeakedCallbackError(
"finished without waiting for callbacks %r" % "finished without waiting for callbacks %r" %
self.pending_callbacks) self.pending_callbacks)
self.result_future.set_result(_value_from_stopiteration(e)) future_set_result_unless_cancelled(self.result_future,
_value_from_stopiteration(e))
self.result_future = None self.result_future = None
self._deactivate_stack_context() self._deactivate_stack_context()
return return
except Exception: except Exception:
self.finished = True self.finished = True
self.future = _null_future self.future = _null_future
self.result_future.set_exc_info(sys.exc_info()) future_set_exc_info(self.result_future, sys.exc_info())
self.result_future = None self.result_future = None
self._deactivate_stack_context() self._deactivate_stack_context()
return return
@ -1110,19 +1189,18 @@ class Runner(object):
if isinstance(yielded, YieldPoint): if isinstance(yielded, YieldPoint):
# YieldPoints are too closely coupled to the Runner to go # YieldPoints are too closely coupled to the Runner to go
# through the generic convert_yielded mechanism. # through the generic convert_yielded mechanism.
self.future = TracebackFuture() self.future = Future()
def start_yield_point(): def start_yield_point():
try: try:
yielded.start(self) yielded.start(self)
if yielded.is_ready(): if yielded.is_ready():
self.future.set_result( future_set_result_unless_cancelled(self.future, yielded.get_result())
yielded.get_result())
else: else:
self.yield_point = yielded self.yield_point = yielded
except Exception: except Exception:
self.future = TracebackFuture() self.future = Future()
self.future.set_exc_info(sys.exc_info()) future_set_exc_info(self.future, sys.exc_info())
if self.stack_context_deactivate is None: if self.stack_context_deactivate is None:
# Start a stack context if this is the first # Start a stack context if this is the first
@ -1142,13 +1220,16 @@ class Runner(object):
try: try:
self.future = convert_yielded(yielded) self.future = convert_yielded(yielded)
except BadYieldError: except BadYieldError:
self.future = TracebackFuture() self.future = Future()
self.future.set_exc_info(sys.exc_info()) future_set_exc_info(self.future, sys.exc_info())
if not self.future.done() or self.future is moment: if self.future is moment:
self.io_loop.add_callback(self.run)
return False
elif not self.future.done():
def inner(f): def inner(f):
# Break a reference cycle to speed GC. # Break a reference cycle to speed GC.
f = None # noqa f = None # noqa
self.run() self.run()
self.io_loop.add_future( self.io_loop.add_future(
self.future, inner) self.future, inner)
@ -1161,8 +1242,8 @@ class Runner(object):
def handle_exception(self, typ, value, tb): def handle_exception(self, typ, value, tb):
if not self.running and not self.finished: if not self.running and not self.finished:
self.future = TracebackFuture() self.future = Future()
self.future.set_exc_info((typ, value, tb)) future_set_exc_info(self.future, (typ, value, tb))
self.run() self.run()
return True return True
else: else:
@ -1194,20 +1275,10 @@ def _argument_adapter(callback):
return wrapper return wrapper
# Convert Awaitables into Futures. It is unfortunately possible # Convert Awaitables into Futures.
# to have infinite recursion here if those Awaitables assume that try:
# we're using a different coroutine runner and yield objects import asyncio
# we don't understand. If that happens, the solution is to except ImportError:
# register that runner's yieldable objects with convert_yielded.
if sys.version_info >= (3, 3):
exec(textwrap.dedent("""
@coroutine
def _wrap_awaitable(x):
if hasattr(x, '__await__'):
x = x.__await__()
return (yield from x)
"""))
else:
# Py2-compatible version for use with Cython. # Py2-compatible version for use with Cython.
# Copied from PEP 380. # Copied from PEP 380.
@coroutine @coroutine
@ -1254,6 +1325,13 @@ else:
_r = _value_from_stopiteration(_e) _r = _value_from_stopiteration(_e)
break break
raise Return(_r) raise Return(_r)
else:
try:
_wrap_awaitable = asyncio.ensure_future
except AttributeError:
# asyncio.ensure_future was introduced in Python 3.4.4, but
# Debian jessie still ships with 3.4.2 so try the old name.
_wrap_awaitable = getattr(asyncio, 'async')
def convert_yielded(yielded): def convert_yielded(yielded):
@ -1271,8 +1349,10 @@ def convert_yielded(yielded):
.. versionadded:: 4.1 .. versionadded:: 4.1
""" """
# Lists and dicts containing YieldPoints were handled earlier. # Lists and dicts containing YieldPoints were handled earlier.
if yielded is None: if yielded is None or yielded is moment:
return moment return moment
elif yielded is _null_future:
return _null_future
elif isinstance(yielded, (list, dict)): elif isinstance(yielded, (list, dict)):
return multi(yielded) return multi(yielded)
elif is_future(yielded): elif is_future(yielded):
@ -1285,19 +1365,3 @@ def convert_yielded(yielded):
if singledispatch is not None: if singledispatch is not None:
convert_yielded = singledispatch(convert_yielded) convert_yielded = singledispatch(convert_yielded)
try:
# If we can import t.p.asyncio, do it for its side effect
# (registering asyncio.Future with convert_yielded).
# It's ugly to do this here, but it prevents a cryptic
# infinite recursion in _wrap_awaitable.
# Note that even with this, asyncio integration is unlikely
# to work unless the application also configures AsyncIOLoop,
# but at least the error messages in that case are more
# comprehensible than a stack overflow.
import tornado.platform.asyncio
except ImportError:
pass
else:
# Reference the imported module to make pyflakes happy.
tornado

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2014 Facebook # Copyright 2014 Facebook
# #
@ -23,7 +22,8 @@ from __future__ import absolute_import, division, print_function
import re import re
from tornado.concurrent import Future from tornado.concurrent import (Future, future_add_done_callback,
future_set_result_unless_cancelled)
from tornado.escape import native_str, utf8 from tornado.escape import native_str, utf8
from tornado import gen from tornado import gen
from tornado import httputil from tornado import httputil
@ -164,7 +164,6 @@ class HTTP1Connection(httputil.HTTPConnection):
header_data = yield gen.with_timeout( header_data = yield gen.with_timeout(
self.stream.io_loop.time() + self.params.header_timeout, self.stream.io_loop.time() + self.params.header_timeout,
header_future, header_future,
io_loop=self.stream.io_loop,
quiet_exceptions=iostream.StreamClosedError) quiet_exceptions=iostream.StreamClosedError)
except gen.TimeoutError: except gen.TimeoutError:
self.close() self.close()
@ -224,7 +223,7 @@ class HTTP1Connection(httputil.HTTPConnection):
try: try:
yield gen.with_timeout( yield gen.with_timeout(
self.stream.io_loop.time() + self._body_timeout, self.stream.io_loop.time() + self._body_timeout,
body_future, self.stream.io_loop, body_future,
quiet_exceptions=iostream.StreamClosedError) quiet_exceptions=iostream.StreamClosedError)
except gen.TimeoutError: except gen.TimeoutError:
gen_log.info("Timeout reading body from %s", gen_log.info("Timeout reading body from %s",
@ -251,6 +250,8 @@ class HTTP1Connection(httputil.HTTPConnection):
except httputil.HTTPInputError as e: except httputil.HTTPInputError as e:
gen_log.info("Malformed HTTP message from %s: %s", gen_log.info("Malformed HTTP message from %s: %s",
self.context, e) self.context, e)
if not self.is_client:
yield self.stream.write(b'HTTP/1.1 400 Bad Request\r\n\r\n')
self.close() self.close()
raise gen.Return(False) raise gen.Return(False)
finally: finally:
@ -290,7 +291,7 @@ class HTTP1Connection(httputil.HTTPConnection):
self._close_callback = None self._close_callback = None
callback() callback()
if not self._finish_future.done(): if not self._finish_future.done():
self._finish_future.set_result(None) future_set_result_unless_cancelled(self._finish_future, None)
self._clear_callbacks() self._clear_callbacks()
def close(self): def close(self):
@ -298,7 +299,7 @@ class HTTP1Connection(httputil.HTTPConnection):
self.stream.close() self.stream.close()
self._clear_callbacks() self._clear_callbacks()
if not self._finish_future.done(): if not self._finish_future.done():
self._finish_future.set_result(None) future_set_result_unless_cancelled(self._finish_future, None)
def detach(self): def detach(self):
"""Take control of the underlying stream. """Take control of the underlying stream.
@ -312,7 +313,7 @@ class HTTP1Connection(httputil.HTTPConnection):
stream = self.stream stream = self.stream
self.stream = None self.stream = None
if not self._finish_future.done(): if not self._finish_future.done():
self._finish_future.set_result(None) future_set_result_unless_cancelled(self._finish_future, None)
return stream return stream
def set_body_timeout(self, timeout): def set_body_timeout(self, timeout):
@ -349,19 +350,22 @@ class HTTP1Connection(httputil.HTTPConnection):
# self._request_start_line.version or # self._request_start_line.version or
# start_line.version? # start_line.version?
self._request_start_line.version == 'HTTP/1.1' and self._request_start_line.version == 'HTTP/1.1' and
# 304 responses have no body (not even a zero-length body), and so # 1xx, 204 and 304 responses have no body (not even a zero-length
# should not have either Content-Length or Transfer-Encoding. # body), and so should not have either Content-Length or
# headers. # Transfer-Encoding headers.
start_line.code not in (204, 304) and start_line.code not in (204, 304) and
(start_line.code < 100 or start_line.code >= 200) and
# No need to chunk the output if a Content-Length is specified. # No need to chunk the output if a Content-Length is specified.
'Content-Length' not in headers and 'Content-Length' not in headers and
# Applications are discouraged from touching Transfer-Encoding, # Applications are discouraged from touching Transfer-Encoding,
# but if they do, leave it alone. # but if they do, leave it alone.
'Transfer-Encoding' not in headers) 'Transfer-Encoding' not in headers)
# If connection to a 1.1 client will be closed, inform client
if (self._request_start_line.version == 'HTTP/1.1' and self._disconnect_on_finish):
headers['Connection'] = 'close'
# If a 1.0 client asked for keep-alive, add the header. # If a 1.0 client asked for keep-alive, add the header.
if (self._request_start_line.version == 'HTTP/1.0' and if (self._request_start_line.version == 'HTTP/1.0' and
(self._request_headers.get('Connection', '').lower() == self._request_headers.get('Connection', '').lower() == 'keep-alive'):
'keep-alive')):
headers['Connection'] = 'Keep-Alive' headers['Connection'] = 'Keep-Alive'
if self._chunking_output: if self._chunking_output:
headers['Transfer-Encoding'] = 'chunked' headers['Transfer-Encoding'] = 'chunked'
@ -419,7 +423,7 @@ class HTTP1Connection(httputil.HTTPConnection):
def write(self, chunk, callback=None): def write(self, chunk, callback=None):
"""Implements `.HTTPConnection.write`. """Implements `.HTTPConnection.write`.
For backwards compatibility is is allowed but deprecated to For backwards compatibility it is allowed but deprecated to
skip `write_headers` and instead call `write()` with a skip `write_headers` and instead call `write()` with a
pre-encoded header block. pre-encoded header block.
""" """
@ -464,7 +468,7 @@ class HTTP1Connection(httputil.HTTPConnection):
if self._pending_write is None: if self._pending_write is None:
self._finish_request(None) self._finish_request(None)
else: else:
self._pending_write.add_done_callback(self._finish_request) future_add_done_callback(self._pending_write, self._finish_request)
def _on_write_complete(self, future): def _on_write_complete(self, future):
exc = future.exception() exc = future.exception()
@ -477,7 +481,7 @@ class HTTP1Connection(httputil.HTTPConnection):
if self._write_future is not None: if self._write_future is not None:
future = self._write_future future = self._write_future
self._write_future = None self._write_future = None
future.set_result(None) future_set_result_unless_cancelled(future, None)
def _can_keep_alive(self, start_line, headers): def _can_keep_alive(self, start_line, headers):
if self.params.no_keep_alive: if self.params.no_keep_alive:
@ -504,7 +508,7 @@ class HTTP1Connection(httputil.HTTPConnection):
# default state for the next request. # default state for the next request.
self.stream.set_nodelay(False) self.stream.set_nodelay(False)
if not self._finish_future.done(): if not self._finish_future.done():
self._finish_future.set_result(None) future_set_result_unless_cancelled(self._finish_future, None)
def _parse_headers(self, data): def _parse_headers(self, data):
# The lstrip removes newlines that some implementations sometimes # The lstrip removes newlines that some implementations sometimes
@ -515,12 +519,7 @@ class HTTP1Connection(httputil.HTTPConnection):
# RFC 7230 section allows for both CRLF and bare LF. # RFC 7230 section allows for both CRLF and bare LF.
eol = data.find("\n") eol = data.find("\n")
start_line = data[:eol].rstrip("\r") start_line = data[:eol].rstrip("\r")
try: headers = httputil.HTTPHeaders.parse(data[eol:])
headers = httputil.HTTPHeaders.parse(data[eol:])
except ValueError:
# probably form split() if there was no ':' in the line
raise httputil.HTTPInputError("Malformed HTTP headers: %r" %
data[eol:100])
return start_line, headers return start_line, headers
def _read_body(self, code, headers, delegate): def _read_body(self, code, headers, delegate):
@ -592,6 +591,9 @@ class HTTP1Connection(httputil.HTTPConnection):
chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64) chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64)
chunk_len = int(chunk_len.strip(), 16) chunk_len = int(chunk_len.strip(), 16)
if chunk_len == 0: if chunk_len == 0:
crlf = yield self.stream.read_bytes(2)
if crlf != b'\r\n':
raise httputil.HTTPInputError("improperly terminated chunked request")
return return
total_size += chunk_len total_size += chunk_len
if total_size > self._max_body_size: if total_size > self._max_body_size:

View file

@ -44,9 +44,9 @@ import functools
import time import time
import weakref import weakref
from tornado.concurrent import TracebackFuture from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado.escape import utf8, native_str from tornado.escape import utf8, native_str
from tornado import httputil, stack_context from tornado import gen, httputil, stack_context
from tornado.ioloop import IOLoop from tornado.ioloop import IOLoop
from tornado.util import Configurable from tornado.util import Configurable
@ -54,8 +54,10 @@ from tornado.util import Configurable
class HTTPClient(object): class HTTPClient(object):
"""A blocking HTTP client. """A blocking HTTP client.
This interface is provided for convenience and testing; most applications This interface is provided to make it easier to share code between
that are running an IOLoop will want to use `AsyncHTTPClient` instead. synchronous and asynchronous applications. Applications that are
running an `.IOLoop` must use `AsyncHTTPClient` instead.
Typical usage looks like this:: Typical usage looks like this::
http_client = httpclient.HTTPClient() http_client = httpclient.HTTPClient()
@ -70,12 +72,26 @@ class HTTPClient(object):
# Other errors are possible, such as IOError. # Other errors are possible, such as IOError.
print("Error: " + str(e)) print("Error: " + str(e))
http_client.close() http_client.close()
.. versionchanged:: 5.0
Due to limitations in `asyncio`, it is no longer possible to
use the synchronous ``HTTPClient`` while an `.IOLoop` is running.
Use `AsyncHTTPClient` instead.
""" """
def __init__(self, async_client_class=None, **kwargs): def __init__(self, async_client_class=None, **kwargs):
# Initialize self._closed at the beginning of the constructor
# so that an exception raised here doesn't lead to confusing
# failures in __del__.
self._closed = True
self._io_loop = IOLoop(make_current=False) self._io_loop = IOLoop(make_current=False)
if async_client_class is None: if async_client_class is None:
async_client_class = AsyncHTTPClient async_client_class = AsyncHTTPClient
self._async_client = async_client_class(self._io_loop, **kwargs) # Create the client while our IOLoop is "current", without
# clobbering the thread's real current IOLoop (if any).
self._async_client = self._io_loop.run_sync(
gen.coroutine(lambda: async_client_class(**kwargs)))
self._closed = False self._closed = False
def __del__(self): def __del__(self):
@ -120,12 +136,12 @@ class AsyncHTTPClient(Configurable):
The constructor for this class is magic in several respects: It The constructor for this class is magic in several respects: It
actually creates an instance of an implementation-specific actually creates an instance of an implementation-specific
subclass, and instances are reused as a kind of pseudo-singleton subclass, and instances are reused as a kind of pseudo-singleton
(one per `.IOLoop`). The keyword argument ``force_instance=True`` (one per `.IOLoop`). The keyword argument ``force_instance=True``
can be used to suppress this singleton behavior. Unless can be used to suppress this singleton behavior. Unless
``force_instance=True`` is used, no arguments other than ``force_instance=True`` is used, no arguments should be passed to
``io_loop`` should be passed to the `AsyncHTTPClient` constructor. the `AsyncHTTPClient` constructor. The implementation subclass as
The implementation subclass as well as arguments to its well as arguments to its constructor can be set with the static
constructor can be set with the static method `configure()` method `configure()`
All `AsyncHTTPClient` implementations support a ``defaults`` All `AsyncHTTPClient` implementations support a ``defaults``
keyword argument, which can be used to set default values for keyword argument, which can be used to set default values for
@ -137,8 +153,9 @@ class AsyncHTTPClient(Configurable):
client = AsyncHTTPClient(force_instance=True, client = AsyncHTTPClient(force_instance=True,
defaults=dict(user_agent="MyUserAgent")) defaults=dict(user_agent="MyUserAgent"))
.. versionchanged:: 4.1 .. versionchanged:: 5.0
The ``io_loop`` argument is deprecated. The ``io_loop`` argument (deprecated since version 4.1) has been removed.
""" """
@classmethod @classmethod
def configurable_base(cls): def configurable_base(cls):
@ -156,16 +173,15 @@ class AsyncHTTPClient(Configurable):
setattr(cls, attr_name, weakref.WeakKeyDictionary()) setattr(cls, attr_name, weakref.WeakKeyDictionary())
return getattr(cls, attr_name) return getattr(cls, attr_name)
def __new__(cls, io_loop=None, force_instance=False, **kwargs): def __new__(cls, force_instance=False, **kwargs):
io_loop = io_loop or IOLoop.current() io_loop = IOLoop.current()
if force_instance: if force_instance:
instance_cache = None instance_cache = None
else: else:
instance_cache = cls._async_clients() instance_cache = cls._async_clients()
if instance_cache is not None and io_loop in instance_cache: if instance_cache is not None and io_loop in instance_cache:
return instance_cache[io_loop] return instance_cache[io_loop]
instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop, instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs)
**kwargs)
# Make sure the instance knows which cache to remove itself from. # Make sure the instance knows which cache to remove itself from.
# It can't simply call _async_clients() because we may be in # It can't simply call _async_clients() because we may be in
# __new__(AsyncHTTPClient) but instance.__class__ may be # __new__(AsyncHTTPClient) but instance.__class__ may be
@ -175,8 +191,8 @@ class AsyncHTTPClient(Configurable):
instance_cache[instance.io_loop] = instance instance_cache[instance.io_loop] = instance
return instance return instance
def initialize(self, io_loop, defaults=None): def initialize(self, defaults=None):
self.io_loop = io_loop self.io_loop = IOLoop.current()
self.defaults = dict(HTTPRequest._DEFAULTS) self.defaults = dict(HTTPRequest._DEFAULTS)
if defaults is not None: if defaults is not None:
self.defaults.update(defaults) self.defaults.update(defaults)
@ -235,7 +251,7 @@ class AsyncHTTPClient(Configurable):
# where normal dicts get converted to HTTPHeaders objects. # where normal dicts get converted to HTTPHeaders objects.
request.headers = httputil.HTTPHeaders(request.headers) request.headers = httputil.HTTPHeaders(request.headers)
request = _RequestProxy(request, self.defaults) request = _RequestProxy(request, self.defaults)
future = TracebackFuture() future = Future()
if callback is not None: if callback is not None:
callback = stack_context.wrap(callback) callback = stack_context.wrap(callback)
@ -256,7 +272,7 @@ class AsyncHTTPClient(Configurable):
if raise_error and response.error: if raise_error and response.error:
future.set_exception(response.error) future.set_exception(response.error)
else: else:
future.set_result(response) future_set_result_unless_cancelled(future, response)
self.fetch_impl(request, handle_response) self.fetch_impl(request, handle_response)
return future return future
@ -318,8 +334,8 @@ class HTTPRequest(object):
ssl_options=None): ssl_options=None):
r"""All parameters except ``url`` are optional. r"""All parameters except ``url`` are optional.
:arg string url: URL to fetch :arg str url: URL to fetch
:arg string method: HTTP method, e.g. "GET" or "POST" :arg str method: HTTP method, e.g. "GET" or "POST"
:arg headers: Additional HTTP headers to pass on the request :arg headers: Additional HTTP headers to pass on the request
:type headers: `~tornado.httputil.HTTPHeaders` or `dict` :type headers: `~tornado.httputil.HTTPHeaders` or `dict`
:arg body: HTTP request body as a string (byte or unicode; if unicode :arg body: HTTP request body as a string (byte or unicode; if unicode
@ -335,9 +351,9 @@ class HTTPRequest(object):
to pass a ``Content-Length`` in the headers as otherwise chunked to pass a ``Content-Length`` in the headers as otherwise chunked
encoding will be used, and many servers do not support chunked encoding will be used, and many servers do not support chunked
encoding on requests. New in Tornado 4.0 encoding on requests. New in Tornado 4.0
:arg string auth_username: Username for HTTP authentication :arg str auth_username: Username for HTTP authentication
:arg string auth_password: Password for HTTP authentication :arg str auth_password: Password for HTTP authentication
:arg string auth_mode: Authentication mode; default is "basic". :arg str auth_mode: Authentication mode; default is "basic".
Allowed values are implementation-defined; ``curl_httpclient`` Allowed values are implementation-defined; ``curl_httpclient``
supports "basic" and "digest"; ``simple_httpclient`` only supports supports "basic" and "digest"; ``simple_httpclient`` only supports
"basic" "basic"
@ -350,19 +366,19 @@ class HTTPRequest(object):
:arg bool follow_redirects: Should redirects be followed automatically :arg bool follow_redirects: Should redirects be followed automatically
or return the 3xx response? Default True. or return the 3xx response? Default True.
:arg int max_redirects: Limit for ``follow_redirects``, default 5. :arg int max_redirects: Limit for ``follow_redirects``, default 5.
:arg string user_agent: String to send as ``User-Agent`` header :arg str user_agent: String to send as ``User-Agent`` header
:arg bool decompress_response: Request a compressed response from :arg bool decompress_response: Request a compressed response from
the server and decompress it after downloading. Default is True. the server and decompress it after downloading. Default is True.
New in Tornado 4.0. New in Tornado 4.0.
:arg bool use_gzip: Deprecated alias for ``decompress_response`` :arg bool use_gzip: Deprecated alias for ``decompress_response``
since Tornado 4.0. since Tornado 4.0.
:arg string network_interface: Network interface to use for request. :arg str network_interface: Network interface to use for request.
``curl_httpclient`` only; see note below. ``curl_httpclient`` only; see note below.
:arg callable streaming_callback: If set, ``streaming_callback`` will :arg collections.abc.Callable streaming_callback: If set, ``streaming_callback`` will
be run with each chunk of data as it is received, and be run with each chunk of data as it is received, and
``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in ``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
the final response. the final response.
:arg callable header_callback: If set, ``header_callback`` will :arg collections.abc.Callable header_callback: If set, ``header_callback`` will
be run with each header line as it is received (including the be run with each header line as it is received (including the
first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
containing only ``\r\n``. All lines include the trailing newline containing only ``\r\n``. All lines include the trailing newline
@ -370,28 +386,28 @@ class HTTPRequest(object):
response. This is most useful in conjunction with response. This is most useful in conjunction with
``streaming_callback``, because it's the only way to get access to ``streaming_callback``, because it's the only way to get access to
header data while the request is in progress. header data while the request is in progress.
:arg callable prepare_curl_callback: If set, will be called with :arg collections.abc.Callable prepare_curl_callback: If set, will be called with
a ``pycurl.Curl`` object to allow the application to make additional a ``pycurl.Curl`` object to allow the application to make additional
``setopt`` calls. ``setopt`` calls.
:arg string proxy_host: HTTP proxy hostname. To use proxies, :arg str proxy_host: HTTP proxy hostname. To use proxies,
``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``, ``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``,
``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are ``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are
currently only supported with ``curl_httpclient``. currently only supported with ``curl_httpclient``.
:arg int proxy_port: HTTP proxy port :arg int proxy_port: HTTP proxy port
:arg string proxy_username: HTTP proxy username :arg str proxy_username: HTTP proxy username
:arg string proxy_password: HTTP proxy password :arg str proxy_password: HTTP proxy password
:arg string proxy_auth_mode: HTTP proxy Authentication mode; :arg str proxy_auth_mode: HTTP proxy Authentication mode;
default is "basic". supports "basic" and "digest" default is "basic". supports "basic" and "digest"
:arg bool allow_nonstandard_methods: Allow unknown values for ``method`` :arg bool allow_nonstandard_methods: Allow unknown values for ``method``
argument? Default is False. argument? Default is False.
:arg bool validate_cert: For HTTPS requests, validate the server's :arg bool validate_cert: For HTTPS requests, validate the server's
certificate? Default is True. certificate? Default is True.
:arg string ca_certs: filename of CA certificates in PEM format, :arg str ca_certs: filename of CA certificates in PEM format,
or None to use defaults. See note below when used with or None to use defaults. See note below when used with
``curl_httpclient``. ``curl_httpclient``.
:arg string client_key: Filename for client SSL key, if any. See :arg str client_key: Filename for client SSL key, if any. See
note below when used with ``curl_httpclient``. note below when used with ``curl_httpclient``.
:arg string client_cert: Filename for client SSL certificate, if any. :arg str client_cert: Filename for client SSL certificate, if any.
See note below when used with ``curl_httpclient``. See note below when used with ``curl_httpclient``.
:arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in :arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in
``simple_httpclient`` (unsupported by ``curl_httpclient``). ``simple_httpclient`` (unsupported by ``curl_httpclient``).
@ -654,6 +670,8 @@ def main():
define("print_body", type=bool, default=True) define("print_body", type=bool, default=True)
define("follow_redirects", type=bool, default=True) define("follow_redirects", type=bool, default=True)
define("validate_cert", type=bool, default=True) define("validate_cert", type=bool, default=True)
define("proxy_host", type=str)
define("proxy_port", type=int)
args = parse_command_line() args = parse_command_line()
client = HTTPClient() client = HTTPClient()
for arg in args: for arg in args:
@ -661,6 +679,8 @@ def main():
response = client.fetch(arg, response = client.fetch(arg,
follow_redirects=options.follow_redirects, follow_redirects=options.follow_redirects,
validate_cert=options.validate_cert, validate_cert=options.validate_cert,
proxy_host=options.proxy_host,
proxy_port=options.proxy_port,
) )
except HTTPError as e: except HTTPError as e:
if e.response is not None: if e.response is not None:

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #
@ -77,7 +76,7 @@ class HTTPServer(TCPServer, Configurable,
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"), ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
os.path.join(data_dir, "mydomain.key")) os.path.join(data_dir, "mydomain.key"))
HTTPServer(applicaton, ssl_options=ssl_ctx) HTTPServer(application, ssl_options=ssl_ctx)
`HTTPServer` initialization follows one of three patterns (the `HTTPServer` initialization follows one of three patterns (the
initialization methods are defined on `tornado.tcpserver.TCPServer`): initialization methods are defined on `tornado.tcpserver.TCPServer`):
@ -134,6 +133,9 @@ class HTTPServer(TCPServer, Configurable,
.. versionchanged:: 4.5 .. versionchanged:: 4.5
Added the ``trusted_downstream`` argument. Added the ``trusted_downstream`` argument.
.. versionchanged:: 5.0
The ``io_loop`` argument has been removed.
""" """
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
# Ignore args to __init__; real initialization belongs in # Ignore args to __init__; real initialization belongs in
@ -143,7 +145,7 @@ class HTTPServer(TCPServer, Configurable,
# completely) # completely)
pass pass
def initialize(self, request_callback, no_keep_alive=False, io_loop=None, def initialize(self, request_callback, no_keep_alive=False,
xheaders=False, ssl_options=None, protocol=None, xheaders=False, ssl_options=None, protocol=None,
decompress_request=False, decompress_request=False,
chunk_size=None, max_header_size=None, chunk_size=None, max_header_size=None,
@ -151,7 +153,6 @@ class HTTPServer(TCPServer, Configurable,
max_body_size=None, max_buffer_size=None, max_body_size=None, max_buffer_size=None,
trusted_downstream=None): trusted_downstream=None):
self.request_callback = request_callback self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.xheaders = xheaders self.xheaders = xheaders
self.protocol = protocol self.protocol = protocol
self.conn_params = HTTP1ConnectionParameters( self.conn_params = HTTP1ConnectionParameters(
@ -162,7 +163,7 @@ class HTTPServer(TCPServer, Configurable,
max_body_size=max_body_size, max_body_size=max_body_size,
body_timeout=body_timeout, body_timeout=body_timeout,
no_keep_alive=no_keep_alive) no_keep_alive=no_keep_alive)
TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options, TCPServer.__init__(self, ssl_options=ssl_options,
max_buffer_size=max_buffer_size, max_buffer_size=max_buffer_size,
read_chunk_size=chunk_size) read_chunk_size=chunk_size)
self._connections = set() self._connections = set()
@ -285,6 +286,10 @@ class _HTTPRequestContext(object):
proto_header = headers.get( proto_header = headers.get(
"X-Scheme", headers.get("X-Forwarded-Proto", "X-Scheme", headers.get("X-Forwarded-Proto",
self.protocol)) self.protocol))
if proto_header:
# use only the last proto entry if there is more than one
# TODO: support trusting mutiple layers of proxied protocol
proto_header = proto_header.split(',')[-1].strip()
if proto_header in ("http", "https"): if proto_header in ("http", "https"):
self.protocol = proto_header self.protocol = proto_header

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #
@ -61,7 +60,7 @@ except ImportError:
SSLError = _SSLError # type: ignore SSLError = _SSLError # type: ignore
try: try:
import typing import typing # noqa: F401
except ImportError: except ImportError:
pass pass
@ -184,11 +183,16 @@ class HTTPHeaders(collections.MutableMapping):
""" """
if line[0].isspace(): if line[0].isspace():
# continuation of a multi-line header # continuation of a multi-line header
if self._last_key is None:
raise HTTPInputError("first header line cannot start with whitespace")
new_part = ' ' + line.lstrip() new_part = ' ' + line.lstrip()
self._as_list[self._last_key][-1] += new_part self._as_list[self._last_key][-1] += new_part
self._dict[self._last_key] += new_part self._dict[self._last_key] += new_part
else: else:
name, value = line.split(":", 1) try:
name, value = line.split(":", 1)
except ValueError:
raise HTTPInputError("no colon in header line")
self.add(name, value.strip()) self.add(name, value.strip())
@classmethod @classmethod
@ -198,6 +202,12 @@ class HTTPHeaders(collections.MutableMapping):
>>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n") >>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n")
>>> sorted(h.items()) >>> sorted(h.items())
[('Content-Length', '42'), ('Content-Type', 'text/html')] [('Content-Length', '42'), ('Content-Type', 'text/html')]
.. versionchanged:: 5.1
Raises `HTTPInputError` on malformed headers instead of a
mix of `KeyError`, and `ValueError`.
""" """
h = cls() h = cls()
for line in _CRLF_RE.split(headers): for line in _CRLF_RE.split(headers):
@ -467,8 +477,7 @@ class HTTPServerRequest(object):
def __repr__(self): def __repr__(self):
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip") attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs]) args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
return "%s(%s, headers=%s)" % ( return "%s(%s)" % (self.__class__.__name__, args)
self.__class__.__name__, args, dict(self.headers))
class HTTPInputError(Exception): class HTTPInputError(Exception):
@ -829,6 +838,8 @@ def parse_request_start_line(line):
try: try:
method, path, version = line.split(" ") method, path, version = line.split(" ")
except ValueError: except ValueError:
# https://tools.ietf.org/html/rfc7230#section-3.1.1
# invalid request-line SHOULD respond with a 400 (Bad Request)
raise HTTPInputError("Malformed HTTP request line") raise HTTPInputError("Malformed HTTP request line")
if not re.match(r"^HTTP/1\.[0-9]$", version): if not re.match(r"^HTTP/1\.[0-9]$", version):
raise HTTPInputError( raise HTTPInputError(
@ -940,6 +951,16 @@ def split_host_and_port(netloc):
return (host, port) return (host, port)
def qs_to_qsl(qs):
"""Generator converting a result of ``parse_qs`` back to name-value pairs.
.. versionadded:: 5.0
"""
for k, vs in qs.items():
for v in vs:
yield (k, v)
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]") _OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].") _QuotePatt = re.compile(r"[\\].")
_nulljoin = ''.join _nulljoin = ''.join

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #
@ -16,14 +15,19 @@
"""An I/O event loop for non-blocking sockets. """An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the On Python 3, `.IOLoop` is a wrapper around the `asyncio` event loop.
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may Typical applications will use a single `IOLoop` object, accessed via
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest` `IOLoop.current` class method. The `IOLoop.start` method (or
case. equivalently, `asyncio.AbstractEventLoop.run_forever`) should usually
be called at the end of the ``main()`` function. Atypical applications
may use more than one `IOLoop`, such as one `IOLoop` per thread, or
per `unittest` case.
In addition to I/O events, the `IOLoop` can also schedule time-based
events. `IOLoop.add_timeout` is a non-blocking alternative to
`time.sleep`.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
""" """
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
@ -44,39 +48,48 @@ import time
import traceback import traceback
import math import math
from tornado.concurrent import TracebackFuture, is_future from tornado.concurrent import Future, is_future, chain_future, future_set_exc_info, future_add_done_callback # noqa: E501
from tornado.log import app_log, gen_log from tornado.log import app_log, gen_log
from tornado.platform.auto import set_close_exec, Waker from tornado.platform.auto import set_close_exec, Waker
from tornado import stack_context from tornado import stack_context
from tornado.util import PY3, Configurable, errno_from_exception, timedelta_to_seconds from tornado.util import (
PY3, Configurable, errno_from_exception, timedelta_to_seconds,
TimeoutError, unicode_type, import_object,
)
try: try:
import signal import signal
except ImportError: except ImportError:
signal = None signal = None
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
ThreadPoolExecutor = None
if PY3: if PY3:
import _thread as thread import _thread as thread
else: else:
import thread import thread
try:
import asyncio
except ImportError:
asyncio = None
_POLL_TIMEOUT = 3600.0 _POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable): class IOLoop(Configurable):
"""A level-triggered I/O loop. """A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they On Python 3, `IOLoop` is a wrapper around the `asyncio` event
are available, or else we fall back on select(). If you are loop. On Python 2, it uses ``epoll`` (Linux) or ``kqueue`` (BSD
implementing a system that needs to handle thousands of and Mac OS X) if they are available, or else we fall back on
simultaneous connections, you should use a system that supports select(). If you are implementing a system that needs to handle
either ``epoll`` or ``kqueue``. thousands of simultaneous connections, you should use a system
that supports either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server: Example usage for a simple TCP server:
@ -84,9 +97,18 @@ class IOLoop(Configurable):
import errno import errno
import functools import functools
import tornado.ioloop
import socket import socket
import tornado.ioloop
from tornado import gen
from tornado.iostream import IOStream
@gen.coroutine
def handle_connection(connection, address):
stream = IOStream(connection)
message = yield stream.read_until_close()
print("message from client:", message.decode().strip())
def connection_ready(sock, fd, events): def connection_ready(sock, fd, events):
while True: while True:
try: try:
@ -102,7 +124,7 @@ class IOLoop(Configurable):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0) sock.setblocking(0)
sock.bind(("", port)) sock.bind(("", 8888))
sock.listen(128) sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current() io_loop = tornado.ioloop.IOLoop.current()
@ -121,9 +143,26 @@ class IOLoop(Configurable):
current instance. If ``make_current=False``, the new `IOLoop` will current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current. not try to become current.
In general, an `IOLoop` cannot survive a fork or be shared across
processes in any way. When multiple processes are being used, each
process should create its own `IOLoop`, which also implies that
any objects which depend on the `IOLoop` (such as
`.AsyncHTTPClient`) must also be created in the child processes.
As a guideline, anything that starts processes (including the
`tornado.process` and `multiprocessing` modules) should do so as
early as possible, ideally the first thing the application does
after loading its configuration in ``main()``.
.. versionchanged:: 4.2 .. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop` Added the ``make_current`` keyword argument to the `IOLoop`
constructor. constructor.
.. versionchanged:: 5.0
Uses the `asyncio` event loop by default. The
``IOLoop.configure`` method cannot be used on Python 3 except
to redundantly specify the `asyncio` event loop.
""" """
# Constants from the epoll module # Constants from the epoll module
_EPOLLIN = 0x001 _EPOLLIN = 0x001
@ -141,54 +180,75 @@ class IOLoop(Configurable):
WRITE = _EPOLLOUT WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance # In Python 2, _current.instance points to the current IOLoop.
_instance_lock = threading.Lock()
_current = threading.local() _current = threading.local()
# In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops.
_ioloop_for_asyncio = dict()
@classmethod
def configure(cls, impl, **kwargs):
if asyncio is not None:
from tornado.platform.asyncio import BaseAsyncIOLoop
if isinstance(impl, (str, unicode_type)):
impl = import_object(impl)
if not issubclass(impl, BaseAsyncIOLoop):
raise RuntimeError(
"only AsyncIOLoop is allowed when asyncio is available")
super(IOLoop, cls).configure(impl, **kwargs)
@staticmethod @staticmethod
def instance(): def instance():
"""Returns a global `IOLoop` instance. """Deprecated alias for `IOLoop.current()`.
Most applications have a single, global `IOLoop` running on the .. versionchanged:: 5.0
main thread. Use this method to get this instance from
another thread. In most other cases, it is better to use `current()` Previously, this method returned a global singleton
to get the current thread's `IOLoop`. `IOLoop`, in contrast with the per-thread `IOLoop` returned
by `current()`. In nearly all cases the two were the same
(when they differed, it was generally used from non-Tornado
threads to communicate back to the main thread's `IOLoop`).
This distinction is not present in `asyncio`, so in order
to facilitate integration with that package `instance()`
was changed to be an alias to `current()`. Applications
using the cross-thread communications aspect of
`instance()` should instead set their own global variable
to point to the `IOLoop` they want to use.
.. deprecated:: 5.0
""" """
if not hasattr(IOLoop, "_instance"): return IOLoop.current()
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self): def install(self):
"""Installs this `IOLoop` object as the singleton instance. """Deprecated alias for `make_current()`.
This is normally not necessary as `instance()` will create .. versionchanged:: 5.0
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
When using an `IOLoop` subclass, `install` must be called prior Previously, this method would set this `IOLoop` as the
to creating any objects that implicitly create their own global singleton used by `IOLoop.instance()`. Now that
`IOLoop` (e.g., :class:`tornado.httpclient.AsyncHTTPClient`). `instance()` is an alias for `current()`, `install()`
is an alias for `make_current()`.
.. deprecated:: 5.0
""" """
assert not IOLoop.initialized() self.make_current()
IOLoop._instance = self
@staticmethod @staticmethod
def clear_instance(): def clear_instance():
"""Clear the global `IOLoop` instance. """Deprecated alias for `clear_current()`.
.. versionchanged:: 5.0
Previously, this method would clear the `IOLoop` used as
the global singleton by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`,
`clear_instance()` is an alias for `clear_current()`.
.. deprecated:: 5.0
.. versionadded:: 4.0
""" """
if hasattr(IOLoop, "_instance"): IOLoop.clear_current()
del IOLoop._instance
@staticmethod @staticmethod
def current(instance=True): def current(instance=True):
@ -196,22 +256,42 @@ class IOLoop(Configurable):
If an `IOLoop` is currently running or has been marked as If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is current by `make_current`, returns that instance. If there is
no current `IOLoop`, returns `IOLoop.instance()` (i.e. the no current `IOLoop` and ``instance`` is true, creates one.
main thread's `IOLoop`, creating one if necessary) if ``instance``
is true.
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
.. versionchanged:: 4.1 .. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to Added ``instance`` argument to control the fallback to
`IOLoop.instance()`. `IOLoop.instance()`.
.. versionchanged:: 5.0
On Python 3, control of the current `IOLoop` is delegated
to `asyncio`, with this and other methods as pass-through accessors.
The ``instance`` argument now controls whether an `IOLoop`
is created automatically when there is none, instead of
whether we fall back to `IOLoop.instance()` (which is now
an alias for this method). ``instance=False`` is deprecated,
since even if we do not create an `IOLoop`, this method
may initialize the asyncio loop.
""" """
current = getattr(IOLoop._current, "instance", None) if asyncio is None:
if current is None and instance: current = getattr(IOLoop._current, "instance", None)
return IOLoop.instance() if current is None and instance:
current = IOLoop()
if IOLoop._current.instance is not current:
raise RuntimeError("new IOLoop did not become current")
else:
try:
loop = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
if not instance:
return None
raise
try:
return IOLoop._ioloop_for_asyncio[loop]
except KeyError:
if instance:
from tornado.platform.asyncio import AsyncIOMainLoop
current = AsyncIOMainLoop(make_current=True)
else:
current = None
return current return current
def make_current(self): def make_current(self):
@ -226,12 +306,38 @@ class IOLoop(Configurable):
.. versionchanged:: 4.1 .. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop` An `IOLoop` created while there is no current `IOLoop`
will automatically become current. will automatically become current.
.. versionchanged:: 5.0
This method also sets the current `asyncio` event loop.
""" """
# The asyncio event loops override this method.
assert asyncio is None
old = getattr(IOLoop._current, "instance", None)
if old is not None:
old.clear_current()
IOLoop._current.instance = self IOLoop._current.instance = self
@staticmethod @staticmethod
def clear_current(): def clear_current():
IOLoop._current.instance = None """Clears the `IOLoop` for the current thread.
Intended primarily for use by test frameworks in between tests.
.. versionchanged:: 5.0
This method also clears the current `asyncio` event loop.
"""
old = IOLoop.current(instance=False)
if old is not None:
old._clear_current_hook()
if asyncio is None:
IOLoop._current.instance = None
def _clear_current_hook(self):
"""Instance method called when an IOLoop ceases to be current.
May be overridden by subclasses as a counterpart to make_current.
"""
pass
@classmethod @classmethod
def configurable_base(cls): def configurable_base(cls):
@ -239,22 +345,19 @@ class IOLoop(Configurable):
@classmethod @classmethod
def configurable_default(cls): def configurable_default(cls):
if hasattr(select, "epoll"): if asyncio is not None:
from tornado.platform.epoll import EPollIOLoop from tornado.platform.asyncio import AsyncIOLoop
return EPollIOLoop return AsyncIOLoop
if hasattr(select, "kqueue"): return PollIOLoop
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self, make_current=None): def initialize(self, make_current=None):
if make_current is None: if make_current is None:
if IOLoop.current(instance=False) is None: if IOLoop.current(instance=False) is None:
self.make_current() self.make_current()
elif make_current: elif make_current:
if IOLoop.current(instance=False) is not None: current = IOLoop.current(instance=False)
# AsyncIO loops can already be current by this point.
if current is not None and current is not self:
raise RuntimeError("current IOLoop already exists") raise RuntimeError("current IOLoop already exists")
self.make_current() self.make_current()
@ -333,6 +436,11 @@ class IOLoop(Configurable):
documentation for the `signal` module for more information. documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is If ``action`` is None, the process will be killed if it is
blocked for too long. blocked for too long.
.. deprecated:: 5.0
Not implemented on the `asyncio` event loop. Use the environment
variable ``PYTHONASYNCIODEBUG=1`` instead.
""" """
raise NotImplementedError() raise NotImplementedError()
@ -342,6 +450,11 @@ class IOLoop(Configurable):
Equivalent to ``set_blocking_signal_threshold(seconds, Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)`` self.log_stack)``
.. deprecated:: 5.0
Not implemented on the `asyncio` event loop. Use the environment
variable ``PYTHONASYNCIODEBUG=1`` instead.
""" """
self.set_blocking_signal_threshold(seconds, self.log_stack) self.set_blocking_signal_threshold(seconds, self.log_stack)
@ -414,7 +527,7 @@ class IOLoop(Configurable):
The keyword-only argument ``timeout`` may be used to set The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires, a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised. a `tornado.util.TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine` This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function:: to allow asynchronous calls in a ``main()`` function::
@ -428,6 +541,9 @@ class IOLoop(Configurable):
.. versionchanged:: 4.3 .. versionchanged:: 4.3
Returning a non-``None``, non-yieldable value is now an error. Returning a non-``None``, non-yieldable value is now an error.
.. versionchanged:: 5.0
If a timeout occurs, the ``func`` coroutine will be cancelled.
""" """
future_cell = [None] future_cell = [None]
@ -438,22 +554,29 @@ class IOLoop(Configurable):
from tornado.gen import convert_yielded from tornado.gen import convert_yielded
result = convert_yielded(result) result = convert_yielded(result)
except Exception: except Exception:
future_cell[0] = TracebackFuture() future_cell[0] = Future()
future_cell[0].set_exc_info(sys.exc_info()) future_set_exc_info(future_cell[0], sys.exc_info())
else: else:
if is_future(result): if is_future(result):
future_cell[0] = result future_cell[0] = result
else: else:
future_cell[0] = TracebackFuture() future_cell[0] = Future()
future_cell[0].set_result(result) future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop()) self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run) self.add_callback(run)
if timeout is not None: if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop) def timeout_callback():
# If we can cancel the future, do so and wait on it. If not,
# Just stop the loop and return with the task still pending.
# (If we neither cancel nor wait for the task, a warning
# will be logged).
if not future_cell[0].cancel():
self.stop()
timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback)
self.start() self.start()
if timeout is not None: if timeout is not None:
self.remove_timeout(timeout_handle) self.remove_timeout(timeout_handle)
if not future_cell[0].done(): if future_cell[0].cancelled() or not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout) raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result() return future_cell[0].result()
@ -593,8 +716,39 @@ class IOLoop(Configurable):
""" """
assert is_future(future) assert is_future(future)
callback = stack_context.wrap(callback) callback = stack_context.wrap(callback)
future.add_done_callback( future_add_done_callback(
lambda future: self.add_callback(callback, future)) future, lambda future: self.add_callback(callback, future))
def run_in_executor(self, executor, func, *args):
"""Runs a function in a ``concurrent.futures.Executor``. If
``executor`` is ``None``, the IO loop's default executor will be used.
Use `functools.partial` to pass keyword arguments to ``func``.
.. versionadded:: 5.0
"""
if ThreadPoolExecutor is None:
raise RuntimeError(
"concurrent.futures is required to use IOLoop.run_in_executor")
if executor is None:
if not hasattr(self, '_executor'):
from tornado.process import cpu_count
self._executor = ThreadPoolExecutor(max_workers=(cpu_count() * 5))
executor = self._executor
c_future = executor.submit(func, *args)
# Concurrent Futures are not usable with await. Wrap this in a
# Tornado Future instead, using self.add_future for thread-safety.
t_future = Future()
self.add_future(c_future, lambda f: chain_future(f, t_future))
return t_future
def set_default_executor(self, executor):
"""Sets the default executor to use with :meth:`run_in_executor`.
.. versionadded:: 5.0
"""
self._executor = executor
def _run_callback(self, callback): def _run_callback(self, callback):
"""Runs a callback with error handling. """Runs a callback with error handling.
@ -701,6 +855,7 @@ class PollIOLoop(IOLoop):
self._stopped = False self._stopped = False
self._closing = False self._closing = False
self._thread_ident = None self._thread_ident = None
self._pid = os.getpid()
self._blocking_signal_threshold = None self._blocking_signal_threshold = None
self._timeout_counter = itertools.count() self._timeout_counter = itertools.count()
@ -711,6 +866,22 @@ class PollIOLoop(IOLoop):
lambda fd, events: self._waker.consume(), lambda fd, events: self._waker.consume(),
self.READ) self.READ)
@classmethod
def configurable_base(cls):
return PollIOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def close(self, all_fds=False): def close(self, all_fds=False):
self._closing = True self._closing = True
self.remove_handler(self._waker.fileno()) self.remove_handler(self._waker.fileno())
@ -721,6 +892,8 @@ class PollIOLoop(IOLoop):
self._impl.close() self._impl.close()
self._callbacks = None self._callbacks = None
self._timeouts = None self._timeouts = None
if hasattr(self, '_executor'):
self._executor.shutdown()
def add_handler(self, fd, handler, events): def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd) fd, obj = self.split_fd(fd)
@ -753,12 +926,15 @@ class PollIOLoop(IOLoop):
def start(self): def start(self):
if self._running: if self._running:
raise RuntimeError("IOLoop is already running") raise RuntimeError("IOLoop is already running")
if os.getpid() != self._pid:
raise RuntimeError("Cannot share PollIOLoops across processes")
self._setup_logging() self._setup_logging()
if self._stopped: if self._stopped:
self._stopped = False self._stopped = False
return return
old_current = getattr(IOLoop._current, "instance", None) old_current = IOLoop.current(instance=False)
IOLoop._current.instance = self if old_current is not self:
self.make_current()
self._thread_ident = thread.get_ident() self._thread_ident = thread.get_ident()
self._running = True self._running = True
@ -901,7 +1077,10 @@ class PollIOLoop(IOLoop):
self._stopped = False self._stopped = False
if self._blocking_signal_threshold is not None: if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0) signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current if old_current is None:
IOLoop.clear_current()
elif old_current is not self:
old_current.make_current()
if old_wakeup_fd is not None: if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd) signal.set_wakeup_fd(old_wakeup_fd)
@ -987,20 +1166,23 @@ class PeriodicCallback(object):
`start` must be called after the `PeriodicCallback` is created. `start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 4.1 .. versionchanged:: 5.0
The ``io_loop`` argument is deprecated. The ``io_loop`` argument (deprecated since version 4.1) has been removed.
""" """
def __init__(self, callback, callback_time, io_loop=None): def __init__(self, callback, callback_time):
self.callback = callback self.callback = callback
if callback_time <= 0: if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time") raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False self._running = False
self._timeout = None self._timeout = None
def start(self): def start(self):
"""Starts the timer.""" """Starts the timer."""
# Looking up the IOLoop here allows to first instantiate the
# PeriodicCallback in another thread, then start it using
# IOLoop.add_callback().
self.io_loop = IOLoop.current()
self._running = True self._running = True
self._next_timeout = self.io_loop.time() self._next_timeout = self.io_loop.time()
self._schedule_next() self._schedule_next()

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #
@ -28,16 +27,17 @@ from __future__ import absolute_import, division, print_function
import collections import collections
import errno import errno
import io
import numbers import numbers
import os import os
import socket import socket
import sys import sys
import re import re
from tornado.concurrent import TracebackFuture from tornado.concurrent import Future
from tornado import ioloop from tornado import ioloop
from tornado.log import gen_log, app_log from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError, _client_ssl_defaults, _server_ssl_defaults from tornado.netutil import ssl_wrap_socket, _client_ssl_defaults, _server_ssl_defaults
from tornado import stack_context from tornado import stack_context
from tornado.util import errno_from_exception from tornado.util import errno_from_exception
@ -66,7 +66,7 @@ _ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE,
errno.ETIMEDOUT) errno.ETIMEDOUT)
if hasattr(errno, "WSAECONNRESET"): if hasattr(errno, "WSAECONNRESET"):
_ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) # type: ignore _ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) # type: ignore # noqa: E501
if sys.platform == 'darwin': if sys.platform == 'darwin':
# OSX appears to have a race condition that causes send(2) to return # OSX appears to have a race condition that causes send(2) to return
@ -117,6 +117,96 @@ class StreamBufferFullError(Exception):
""" """
class _StreamBuffer(object):
"""
A specialized buffer that tries to avoid copies when large pieces
of data are encountered.
"""
def __init__(self):
# A sequence of (False, bytearray) and (True, memoryview) objects
self._buffers = collections.deque()
# Position in the first buffer
self._first_pos = 0
self._size = 0
def __len__(self):
return self._size
# Data above this size will be appended separately instead
# of extending an existing bytearray
_large_buf_threshold = 2048
def append(self, data):
"""
Append the given piece of data (should be a buffer-compatible object).
"""
size = len(data)
if size > self._large_buf_threshold:
if not isinstance(data, memoryview):
data = memoryview(data)
self._buffers.append((True, data))
elif size > 0:
if self._buffers:
is_memview, b = self._buffers[-1]
new_buf = is_memview or len(b) >= self._large_buf_threshold
else:
new_buf = True
if new_buf:
self._buffers.append((False, bytearray(data)))
else:
b += data
self._size += size
def peek(self, size):
"""
Get a view over at most ``size`` bytes (possibly fewer) at the
current buffer position.
"""
assert size > 0
try:
is_memview, b = self._buffers[0]
except IndexError:
return memoryview(b'')
pos = self._first_pos
if is_memview:
return b[pos:pos + size]
else:
return memoryview(b)[pos:pos + size]
def advance(self, size):
"""
Advance the current buffer position by ``size`` bytes.
"""
assert 0 < size <= self._size
self._size -= size
pos = self._first_pos
buffers = self._buffers
while buffers and size > 0:
is_large, b = buffers[0]
b_remain = len(b) - size - pos
if b_remain <= 0:
buffers.popleft()
size -= len(b) - pos
pos = 0
elif is_large:
pos += size
size = 0
else:
# Amortized O(1) shrink for Python 2
pos += size
if len(b) <= 2 * pos:
del b[:pos]
pos = 0
size = 0
assert size == 0
self._first_pos = pos
class BaseIOStream(object): class BaseIOStream(object):
"""A utility class to write to and read from a non-blocking file or socket. """A utility class to write to and read from a non-blocking file or socket.
@ -135,12 +225,10 @@ class BaseIOStream(object):
Subclasses must implement `fileno`, `close_fd`, `write_to_fd`, Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
`read_from_fd`, and optionally `get_fd_error`. `read_from_fd`, and optionally `get_fd_error`.
""" """
def __init__(self, io_loop=None, max_buffer_size=None, def __init__(self, max_buffer_size=None,
read_chunk_size=None, max_write_buffer_size=None): read_chunk_size=None, max_write_buffer_size=None):
"""`BaseIOStream` constructor. """`BaseIOStream` constructor.
:arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`.
Deprecated since Tornado 4.1.
:arg max_buffer_size: Maximum amount of incoming data to buffer; :arg max_buffer_size: Maximum amount of incoming data to buffer;
defaults to 100MB. defaults to 100MB.
:arg read_chunk_size: Amount of data to read at one time from the :arg read_chunk_size: Amount of data to read at one time from the
@ -151,8 +239,11 @@ class BaseIOStream(object):
.. versionchanged:: 4.0 .. versionchanged:: 4.0
Add the ``max_write_buffer_size`` parameter. Changed default Add the ``max_write_buffer_size`` parameter. Changed default
``read_chunk_size`` to 64KB. ``read_chunk_size`` to 64KB.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been
removed.
""" """
self.io_loop = io_loop or ioloop.IOLoop.current() self.io_loop = ioloop.IOLoop.current()
self.max_buffer_size = max_buffer_size or 104857600 self.max_buffer_size = max_buffer_size or 104857600
# A chunk size that is too close to max_buffer_size can cause # A chunk size that is too close to max_buffer_size can cause
# spurious failures. # spurious failures.
@ -163,13 +254,11 @@ class BaseIOStream(object):
self._read_buffer = bytearray() self._read_buffer = bytearray()
self._read_buffer_pos = 0 self._read_buffer_pos = 0
self._read_buffer_size = 0 self._read_buffer_size = 0
self._write_buffer = bytearray() self._user_read_buffer = False
self._write_buffer_pos = 0 self._after_user_read_buffer = None
self._write_buffer_size = 0 self._write_buffer = _StreamBuffer()
self._write_buffer_frozen = False
self._total_write_index = 0 self._total_write_index = 0
self._total_write_done_index = 0 self._total_write_done_index = 0
self._pending_writes_while_frozen = []
self._read_delimiter = None self._read_delimiter = None
self._read_regex = None self._read_regex = None
self._read_max_bytes = None self._read_max_bytes = None
@ -213,13 +302,18 @@ class BaseIOStream(object):
""" """
raise NotImplementedError() raise NotImplementedError()
def read_from_fd(self): def read_from_fd(self, buf):
"""Attempts to read from the underlying file. """Attempts to read from the underlying file.
Returns ``None`` if there was nothing to read (the socket Reads up to ``len(buf)`` bytes, storing them in the buffer.
returned `~errno.EWOULDBLOCK` or equivalent), otherwise Returns the number of bytes read. Returns None if there was
returns the data. When possible, should return no more than nothing to read (the socket returned `~errno.EWOULDBLOCK` or
``self.read_chunk_size`` bytes at a time. equivalent), and zero on EOF.
.. versionchanged:: 5.0
Interface redesigned to take a buffer and return a number
of bytes instead of a freshly-allocated object.
""" """
raise NotImplementedError() raise NotImplementedError()
@ -257,7 +351,7 @@ class BaseIOStream(object):
except UnsatisfiableReadError as e: except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events. # Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e) gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True) self.close(exc_info=e)
return future return future
except: except:
if future is not None: if future is not None:
@ -290,7 +384,7 @@ class BaseIOStream(object):
except UnsatisfiableReadError as e: except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events. # Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e) gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True) self.close(exc_info=e)
return future return future
except: except:
if future is not None: if future is not None:
@ -328,6 +422,50 @@ class BaseIOStream(object):
raise raise
return future return future
def read_into(self, buf, callback=None, partial=False):
"""Asynchronously read a number of bytes.
``buf`` must be a writable buffer into which data will be read.
If a callback is given, it will be run with the number of read
bytes as an argument; if not, this method returns a `.Future`.
If ``partial`` is true, the callback is run as soon as any bytes
have been read. Otherwise, it is run when the ``buf`` has been
entirely filled with read data.
.. versionadded:: 5.0
"""
future = self._set_read_callback(callback)
# First copy data already in read buffer
available_bytes = self._read_buffer_size
n = len(buf)
if available_bytes >= n:
end = self._read_buffer_pos + n
buf[:] = memoryview(self._read_buffer)[self._read_buffer_pos:end]
del self._read_buffer[:end]
self._after_user_read_buffer = self._read_buffer
elif available_bytes > 0:
buf[:available_bytes] = memoryview(self._read_buffer)[self._read_buffer_pos:]
# Set up the supplied buffer as our temporary read buffer.
# The original (if it had any data remaining) has been
# saved for later.
self._user_read_buffer = True
self._read_buffer = buf
self._read_buffer_pos = 0
self._read_buffer_size = available_bytes
self._read_bytes = n
self._read_partial = partial
try:
self._try_inline_read()
except:
if future is not None:
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_until_close(self, callback=None, streaming_callback=None): def read_until_close(self, callback=None, streaming_callback=None):
"""Asynchronously reads all data from the socket until it is closed. """Asynchronously reads all data from the socket until it is closed.
@ -387,24 +525,20 @@ class BaseIOStream(object):
self._check_closed() self._check_closed()
if data: if data:
if (self.max_write_buffer_size is not None and if (self.max_write_buffer_size is not None and
self._write_buffer_size + len(data) > self.max_write_buffer_size): len(self._write_buffer) + len(data) > self.max_write_buffer_size):
raise StreamBufferFullError("Reached maximum write buffer size") raise StreamBufferFullError("Reached maximum write buffer size")
if self._write_buffer_frozen: self._write_buffer.append(data)
self._pending_writes_while_frozen.append(data)
else:
self._write_buffer += data
self._write_buffer_size += len(data)
self._total_write_index += len(data) self._total_write_index += len(data)
if callback is not None: if callback is not None:
self._write_callback = stack_context.wrap(callback) self._write_callback = stack_context.wrap(callback)
future = None future = None
else: else:
future = TracebackFuture() future = Future()
future.add_done_callback(lambda f: f.exception()) future.add_done_callback(lambda f: f.exception())
self._write_futures.append((self._total_write_index, future)) self._write_futures.append((self._total_write_index, future))
if not self._connecting: if not self._connecting:
self._handle_write() self._handle_write()
if self._write_buffer_size: if self._write_buffer:
self._add_io_state(self.io_loop.WRITE) self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener() self._maybe_add_error_listener()
return future return future
@ -428,10 +562,14 @@ class BaseIOStream(object):
""" """
if not self.closed(): if not self.closed():
if exc_info: if exc_info:
if not isinstance(exc_info, tuple): if isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if any(exc_info):
self.error = exc_info[1] self.error = exc_info[1]
elif isinstance(exc_info, BaseException):
self.error = exc_info
else:
exc_info = sys.exc_info()
if any(exc_info):
self.error = exc_info[1]
if self._read_until_close: if self._read_until_close:
if (self._streaming_callback is not None and if (self._streaming_callback is not None and
self._read_buffer_size): self._read_buffer_size):
@ -463,6 +601,7 @@ class BaseIOStream(object):
self._ssl_connect_future = None self._ssl_connect_future = None
for future in futures: for future in futures:
future.set_exception(StreamClosedError(real_error=self.error)) future.set_exception(StreamClosedError(real_error=self.error))
future.exception()
if self._close_callback is not None: if self._close_callback is not None:
cb = self._close_callback cb = self._close_callback
self._close_callback = None self._close_callback = None
@ -473,7 +612,6 @@ class BaseIOStream(object):
# if the IOStream object is kept alive by a reference cycle. # if the IOStream object is kept alive by a reference cycle.
# TODO: Clear the read buffer too; it currently breaks some tests. # TODO: Clear the read buffer too; it currently breaks some tests.
self._write_buffer = None self._write_buffer = None
self._write_buffer_size = 0
def reading(self): def reading(self):
"""Returns true if we are currently reading from the stream.""" """Returns true if we are currently reading from the stream."""
@ -481,7 +619,7 @@ class BaseIOStream(object):
def writing(self): def writing(self):
"""Returns true if we are currently writing to the stream.""" """Returns true if we are currently writing to the stream."""
return self._write_buffer_size > 0 return bool(self._write_buffer)
def closed(self): def closed(self):
"""Returns true if the stream has been closed.""" """Returns true if the stream has been closed."""
@ -548,11 +686,11 @@ class BaseIOStream(object):
self.io_loop.update_handler(self.fileno(), self._state) self.io_loop.update_handler(self.fileno(), self._state)
except UnsatisfiableReadError as e: except UnsatisfiableReadError as e:
gen_log.info("Unsatisfiable read, closing connection: %s" % e) gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True) self.close(exc_info=e)
except Exception: except Exception as e:
gen_log.error("Uncaught exception, closing connection.", gen_log.error("Uncaught exception, closing connection.",
exc_info=True) exc_info=True)
self.close(exc_info=True) self.close(exc_info=e)
raise raise
def _run_callback(self, callback, *args): def _run_callback(self, callback, *args):
@ -560,14 +698,14 @@ class BaseIOStream(object):
self._pending_callbacks -= 1 self._pending_callbacks -= 1
try: try:
return callback(*args) return callback(*args)
except Exception: except Exception as e:
app_log.error("Uncaught exception, closing connection.", app_log.error("Uncaught exception, closing connection.",
exc_info=True) exc_info=True)
# Close the socket on an uncaught exception from a user callback # Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is # (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we # gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors) # run out of file descriptors)
self.close(exc_info=True) self.close(exc_info=e)
# Re-raise the exception so that IOLoop.handle_callback_exception # Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error # can see it and log the error
raise raise
@ -672,10 +810,19 @@ class BaseIOStream(object):
if callback is not None: if callback is not None:
self._read_callback = stack_context.wrap(callback) self._read_callback = stack_context.wrap(callback)
else: else:
self._read_future = TracebackFuture() self._read_future = Future()
return self._read_future return self._read_future
def _run_read_callback(self, size, streaming): def _run_read_callback(self, size, streaming):
if self._user_read_buffer:
self._read_buffer = self._after_user_read_buffer or bytearray()
self._after_user_read_buffer = None
self._read_buffer_pos = 0
self._read_buffer_size = len(self._read_buffer)
self._user_read_buffer = False
result = size
else:
result = self._consume(size)
if streaming: if streaming:
callback = self._streaming_callback callback = self._streaming_callback
else: else:
@ -685,10 +832,11 @@ class BaseIOStream(object):
assert callback is None assert callback is None
future = self._read_future future = self._read_future
self._read_future = None self._read_future = None
future.set_result(self._consume(size))
future.set_result(result)
if callback is not None: if callback is not None:
assert (self._read_future is None) or streaming assert (self._read_future is None) or streaming
self._run_callback(callback, self._consume(size)) self._run_callback(callback, result)
else: else:
# If we scheduled a callback, we will add the error listener # If we scheduled a callback, we will add the error listener
# afterwards. If we didn't, we have to do it now. # afterwards. If we didn't, we have to do it now.
@ -734,31 +882,44 @@ class BaseIOStream(object):
to read (i.e. the read returns EWOULDBLOCK or equivalent). On to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception. error closes the socket and raises an exception.
""" """
while True: try:
try: while True:
chunk = self.read_from_fd() try:
except (socket.error, IOError, OSError) as e: if self._user_read_buffer:
if errno_from_exception(e) == errno.EINTR: buf = memoryview(self._read_buffer)[self._read_buffer_size:]
continue else:
# ssl.SSLError is a subclass of socket.error buf = bytearray(self.read_chunk_size)
if self._is_connreset(e): bytes_read = self.read_from_fd(buf)
# Treat ECONNRESET as a connection close rather than except (socket.error, IOError, OSError) as e:
# an error to minimize log spam (the exception will if errno_from_exception(e) == errno.EINTR:
# be available on self.error for apps that care). continue
self.close(exc_info=True) # ssl.SSLError is a subclass of socket.error
return if self._is_connreset(e):
self.close(exc_info=True) # Treat ECONNRESET as a connection close rather than
raise # an error to minimize log spam (the exception will
break # be available on self.error for apps that care).
if chunk is None: self.close(exc_info=e)
return 0 return
self._read_buffer += chunk self.close(exc_info=e)
self._read_buffer_size += len(chunk) raise
break
if bytes_read is None:
return 0
elif bytes_read == 0:
self.close()
return 0
if not self._user_read_buffer:
self._read_buffer += memoryview(buf)[:bytes_read]
self._read_buffer_size += bytes_read
finally:
# Break the reference to buf so we don't waste a chunk's worth of
# memory in case an exception hangs on to our stack frame.
buf = None
if self._read_buffer_size > self.max_buffer_size: if self._read_buffer_size > self.max_buffer_size:
gen_log.error("Reached maximum read buffer size") gen_log.error("Reached maximum read buffer size")
self.close() self.close()
raise StreamBufferFullError("Reached maximum read buffer size") raise StreamBufferFullError("Reached maximum read buffer size")
return len(chunk) return bytes_read
def _run_streaming_callback(self): def _run_streaming_callback(self):
if self._streaming_callback is not None and self._read_buffer_size: if self._streaming_callback is not None and self._read_buffer_size:
@ -828,56 +989,28 @@ class BaseIOStream(object):
"delimiter %r not found within %d bytes" % ( "delimiter %r not found within %d bytes" % (
delimiter, self._read_max_bytes)) delimiter, self._read_max_bytes))
def _freeze_write_buffer(self, size):
self._write_buffer_frozen = size
def _unfreeze_write_buffer(self):
self._write_buffer_frozen = False
self._write_buffer += b''.join(self._pending_writes_while_frozen)
self._write_buffer_size += sum(map(len, self._pending_writes_while_frozen))
self._pending_writes_while_frozen[:] = []
def _got_empty_write(self, size):
"""
Called when a non-blocking write() failed writing anything.
Can be overridden in subclasses.
"""
def _handle_write(self): def _handle_write(self):
while self._write_buffer_size: while True:
assert self._write_buffer_size >= 0 size = len(self._write_buffer)
if not size:
break
assert size > 0
try: try:
start = self._write_buffer_pos if _WINDOWS:
if self._write_buffer_frozen:
size = self._write_buffer_frozen
elif _WINDOWS:
# On windows, socket.send blows up if given a # On windows, socket.send blows up if given a
# write buffer that's too large, instead of just # write buffer that's too large, instead of just
# returning the number of bytes it was able to # returning the number of bytes it was able to
# process. Therefore we must not call socket.send # process. Therefore we must not call socket.send
# with more than 128KB at a time. # with more than 128KB at a time.
size = 128 * 1024 size = 128 * 1024
else:
size = self._write_buffer_size num_bytes = self.write_to_fd(self._write_buffer.peek(size))
num_bytes = self.write_to_fd(
memoryview(self._write_buffer)[start:start + size])
if num_bytes == 0: if num_bytes == 0:
self._got_empty_write(size)
break break
self._write_buffer_pos += num_bytes self._write_buffer.advance(num_bytes)
self._write_buffer_size -= num_bytes
# Amortized O(1) shrink
# (this heuristic is implemented natively in Python 3.4+
# but is replicated here for Python 2)
if self._write_buffer_pos > self._write_buffer_size:
del self._write_buffer[:self._write_buffer_pos]
self._write_buffer_pos = 0
if self._write_buffer_frozen:
self._unfreeze_write_buffer()
self._total_write_done_index += num_bytes self._total_write_done_index += num_bytes
except (socket.error, IOError, OSError) as e: except (socket.error, IOError, OSError) as e:
if e.args[0] in _ERRNO_WOULDBLOCK: if e.args[0] in _ERRNO_WOULDBLOCK:
self._got_empty_write(size)
break break
else: else:
if not self._is_connreset(e): if not self._is_connreset(e):
@ -886,7 +1019,7 @@ class BaseIOStream(object):
# minimize log spam # minimize log spam
gen_log.warning("Write error on %s: %s", gen_log.warning("Write error on %s: %s",
self.fileno(), e) self.fileno(), e)
self.close(exc_info=True) self.close(exc_info=e)
return return
while self._write_futures: while self._write_futures:
@ -896,7 +1029,7 @@ class BaseIOStream(object):
self._write_futures.popleft() self._write_futures.popleft()
future.set_result(None) future.set_result(None)
if not self._write_buffer_size: if not len(self._write_buffer):
if self._write_callback: if self._write_callback:
callback = self._write_callback callback = self._write_callback
self._write_callback = None self._write_callback = None
@ -1048,21 +1181,24 @@ class IOStream(BaseIOStream):
socket.SO_ERROR) socket.SO_ERROR)
return socket.error(errno, os.strerror(errno)) return socket.error(errno, os.strerror(errno))
def read_from_fd(self): def read_from_fd(self, buf):
try: try:
chunk = self.socket.recv(self.read_chunk_size) return self.socket.recv_into(buf)
except socket.error as e: except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK: if e.args[0] in _ERRNO_WOULDBLOCK:
return None return None
else: else:
raise raise
if not chunk: finally:
self.close() buf = None
return None
return chunk
def write_to_fd(self, data): def write_to_fd(self, data):
return self.socket.send(data) try:
return self.socket.send(data)
finally:
# Avoid keeping to data, which can be a memoryview.
# See https://github.com/tornadoweb/tornado/pull/2008
del data
def connect(self, address, callback=None, server_hostname=None): def connect(self, address, callback=None, server_hostname=None):
"""Connects the socket to a remote address without blocking. """Connects the socket to a remote address without blocking.
@ -1108,7 +1244,7 @@ class IOStream(BaseIOStream):
self._connect_callback = stack_context.wrap(callback) self._connect_callback = stack_context.wrap(callback)
future = None future = None
else: else:
future = self._connect_future = TracebackFuture() future = self._connect_future = Future()
try: try:
self.socket.connect(address) self.socket.connect(address)
except socket.error as e: except socket.error as e:
@ -1124,7 +1260,7 @@ class IOStream(BaseIOStream):
if future is None: if future is None:
gen_log.warning("Connect error on fd %s: %s", gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), e) self.socket.fileno(), e)
self.close(exc_info=True) self.close(exc_info=e)
return future return future
self._add_io_state(self.io_loop.WRITE) self._add_io_state(self.io_loop.WRITE)
return future return future
@ -1186,9 +1322,8 @@ class IOStream(BaseIOStream):
orig_close_callback = self._close_callback orig_close_callback = self._close_callback
self._close_callback = None self._close_callback = None
future = TracebackFuture() future = Future()
ssl_stream = SSLIOStream(socket, ssl_options=ssl_options, ssl_stream = SSLIOStream(socket, ssl_options=ssl_options)
io_loop=self.io_loop)
# Wrap the original close callback so we can fail our Future as well. # Wrap the original close callback so we can fail our Future as well.
# If we had an "unwrap" counterpart to this method we would need # If we had an "unwrap" counterpart to this method we would need
# to restore the original callback after our Future resolves # to restore the original callback after our Future resolves
@ -1292,17 +1427,6 @@ class SSLIOStream(IOStream):
def writing(self): def writing(self):
return self._handshake_writing or super(SSLIOStream, self).writing() return self._handshake_writing or super(SSLIOStream, self).writing()
def _got_empty_write(self, size):
# With OpenSSL, if we couldn't write the entire buffer,
# the very same string object must be used on the
# next call to send. Therefore we suppress
# merging the write buffer after an incomplete send.
# A cleaner solution would be to set
# SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is
# not yet accessible from python
# (http://bugs.python.org/issue8240)
self._freeze_write_buffer(size)
def _do_ssl_handshake(self): def _do_ssl_handshake(self):
# Based on code from test_ssl.py in the python stdlib # Based on code from test_ssl.py in the python stdlib
try: try:
@ -1318,7 +1442,7 @@ class SSLIOStream(IOStream):
return return
elif err.args[0] in (ssl.SSL_ERROR_EOF, elif err.args[0] in (ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN): ssl.SSL_ERROR_ZERO_RETURN):
return self.close(exc_info=True) return self.close(exc_info=err)
elif err.args[0] == ssl.SSL_ERROR_SSL: elif err.args[0] == ssl.SSL_ERROR_SSL:
try: try:
peer = self.socket.getpeername() peer = self.socket.getpeername()
@ -1326,7 +1450,7 @@ class SSLIOStream(IOStream):
peer = '(not connected)' peer = '(not connected)'
gen_log.warning("SSL Error on %s %s: %s", gen_log.warning("SSL Error on %s %s: %s",
self.socket.fileno(), peer, err) self.socket.fileno(), peer, err)
return self.close(exc_info=True) return self.close(exc_info=err)
raise raise
except socket.error as err: except socket.error as err:
# Some port scans (e.g. nmap in -sT mode) have been known # Some port scans (e.g. nmap in -sT mode) have been known
@ -1335,13 +1459,13 @@ class SSLIOStream(IOStream):
# https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0 # https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0
if (self._is_connreset(err) or if (self._is_connreset(err) or
err.args[0] in (errno.EBADF, errno.ENOTCONN)): err.args[0] in (errno.EBADF, errno.ENOTCONN)):
return self.close(exc_info=True) return self.close(exc_info=err)
raise raise
except AttributeError: except AttributeError as err:
# On Linux, if the connection was reset before the call to # On Linux, if the connection was reset before the call to
# wrap_socket, do_handshake will fail with an # wrap_socket, do_handshake will fail with an
# AttributeError. # AttributeError.
return self.close(exc_info=True) return self.close(exc_info=err)
else: else:
self._ssl_accepting = False self._ssl_accepting = False
if not self._verify_cert(self.socket.getpeercert()): if not self._verify_cert(self.socket.getpeercert()):
@ -1379,8 +1503,8 @@ class SSLIOStream(IOStream):
gen_log.warning("No SSL certificate given") gen_log.warning("No SSL certificate given")
return False return False
try: try:
ssl_match_hostname(peercert, self._server_hostname) ssl.match_hostname(peercert, self._server_hostname)
except SSLCertificateError as e: except ssl.CertificateError as e:
gen_log.warning("Invalid SSL certificate: %s" % e) gen_log.warning("Invalid SSL certificate: %s" % e)
return False return False
else: else:
@ -1454,7 +1578,7 @@ class SSLIOStream(IOStream):
self._ssl_connect_callback = stack_context.wrap(callback) self._ssl_connect_callback = stack_context.wrap(callback)
future = None future = None
else: else:
future = self._ssl_connect_future = TracebackFuture() future = self._ssl_connect_future = Future()
if not self._ssl_accepting: if not self._ssl_accepting:
self._run_ssl_connect_callback() self._run_ssl_connect_callback()
return future return future
@ -1472,36 +1596,34 @@ class SSLIOStream(IOStream):
# simply return 0 bytes written. # simply return 0 bytes written.
return 0 return 0
raise raise
finally:
# Avoid keeping to data, which can be a memoryview.
# See https://github.com/tornadoweb/tornado/pull/2008
del data
def read_from_fd(self): def read_from_fd(self, buf):
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
# depending on the SSL version)
return None
try: try:
# SSLSocket objects have both a read() and recv() method, if self._ssl_accepting:
# while regular sockets only have recv(). # If the handshake hasn't finished yet, there can't be anything
# The recv() method blocks (at least in python 2.6) if it is # to read (attempting to read may or may not raise an exception
# called when there is nothing to read, so we have to use # depending on the SSL version)
# read() instead.
chunk = self.socket.read(self.read_chunk_size)
except ssl.SSLError as e:
# SSLError is a subclass of socket.error, so this except
# block must come first.
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
return None return None
else: try:
raise return self.socket.recv_into(buf)
except socket.error as e: except ssl.SSLError as e:
if e.args[0] in _ERRNO_WOULDBLOCK: # SSLError is a subclass of socket.error, so this except
return None # block must come first.
else: if e.args[0] == ssl.SSL_ERROR_WANT_READ:
raise return None
if not chunk: else:
self.close() raise
return None except socket.error as e:
return chunk if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
finally:
buf = None
def _is_connreset(self, e): def _is_connreset(self, e):
if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF: if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF:
@ -1519,6 +1641,7 @@ class PipeIOStream(BaseIOStream):
""" """
def __init__(self, fd, *args, **kwargs): def __init__(self, fd, *args, **kwargs):
self.fd = fd self.fd = fd
self._fio = io.FileIO(self.fd, "r+")
_set_nonblocking(fd) _set_nonblocking(fd)
super(PipeIOStream, self).__init__(*args, **kwargs) super(PipeIOStream, self).__init__(*args, **kwargs)
@ -1526,28 +1649,29 @@ class PipeIOStream(BaseIOStream):
return self.fd return self.fd
def close_fd(self): def close_fd(self):
os.close(self.fd) self._fio.close()
def write_to_fd(self, data): def write_to_fd(self, data):
return os.write(self.fd, data)
def read_from_fd(self):
try: try:
chunk = os.read(self.fd, self.read_chunk_size) return os.write(self.fd, data)
finally:
# Avoid keeping to data, which can be a memoryview.
# See https://github.com/tornadoweb/tornado/pull/2008
del data
def read_from_fd(self, buf):
try:
return self._fio.readinto(buf)
except (IOError, OSError) as e: except (IOError, OSError) as e:
if errno_from_exception(e) in _ERRNO_WOULDBLOCK: if errno_from_exception(e) == errno.EBADF:
return None
elif errno_from_exception(e) == errno.EBADF:
# If the writing half of a pipe is closed, select will # If the writing half of a pipe is closed, select will
# report it as readable but reads will fail with EBADF. # report it as readable but reads will fail with EBADF.
self.close(exc_info=True) self.close(exc_info=e)
return None return None
else: else:
raise raise
if not chunk: finally:
self.close() buf = None
return None
return chunk
def doctests(): def doctests():

View file

@ -1,5 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
#
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may

View file

@ -15,9 +15,10 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
import collections import collections
from concurrent.futures import CancelledError
from tornado import gen, ioloop from tornado import gen, ioloop
from tornado.concurrent import Future from tornado.concurrent import Future, future_set_result_unless_cancelled
__all__ = ['Condition', 'Event', 'Semaphore', 'BoundedSemaphore', 'Lock'] __all__ = ['Condition', 'Event', 'Semaphore', 'BoundedSemaphore', 'Lock']
@ -99,8 +100,12 @@ class Condition(_TimeoutGarbageCollector):
# Wait up to 1 second. # Wait up to 1 second.
yield condition.wait(timeout=datetime.timedelta(seconds=1)) yield condition.wait(timeout=datetime.timedelta(seconds=1))
The method raises `tornado.gen.TimeoutError` if there's no notification The method returns False if there's no notification before the deadline.
before the deadline.
.. versionchanged:: 5.0
Previously, waiters could be notified synchronously from within
`notify`. Now, the notification will always be received on the
next iteration of the `.IOLoop`.
""" """
def __init__(self): def __init__(self):
@ -123,7 +128,8 @@ class Condition(_TimeoutGarbageCollector):
self._waiters.append(waiter) self._waiters.append(waiter)
if timeout: if timeout:
def on_timeout(): def on_timeout():
waiter.set_result(False) if not waiter.done():
future_set_result_unless_cancelled(waiter, False)
self._garbage_collect() self._garbage_collect()
io_loop = ioloop.IOLoop.current() io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout) timeout_handle = io_loop.add_timeout(timeout, on_timeout)
@ -141,7 +147,7 @@ class Condition(_TimeoutGarbageCollector):
waiters.append(waiter) waiters.append(waiter)
for waiter in waiters: for waiter in waiters:
waiter.set_result(True) future_set_result_unless_cancelled(waiter, True)
def notify_all(self): def notify_all(self):
"""Wake all waiters.""" """Wake all waiters."""
@ -191,7 +197,8 @@ class Event(object):
Done Done
""" """
def __init__(self): def __init__(self):
self._future = Future() self._value = False
self._waiters = set()
def __repr__(self): def __repr__(self):
return '<%s %s>' % ( return '<%s %s>' % (
@ -199,34 +206,48 @@ class Event(object):
def is_set(self): def is_set(self):
"""Return ``True`` if the internal flag is true.""" """Return ``True`` if the internal flag is true."""
return self._future.done() return self._value
def set(self): def set(self):
"""Set the internal flag to ``True``. All waiters are awakened. """Set the internal flag to ``True``. All waiters are awakened.
Calling `.wait` once the flag is set will not block. Calling `.wait` once the flag is set will not block.
""" """
if not self._future.done(): if not self._value:
self._future.set_result(None) self._value = True
for fut in self._waiters:
if not fut.done():
fut.set_result(None)
def clear(self): def clear(self):
"""Reset the internal flag to ``False``. """Reset the internal flag to ``False``.
Calls to `.wait` will block until `.set` is called. Calls to `.wait` will block until `.set` is called.
""" """
if self._future.done(): self._value = False
self._future = Future()
def wait(self, timeout=None): def wait(self, timeout=None):
"""Block until the internal flag is true. """Block until the internal flag is true.
Returns a Future, which raises `tornado.gen.TimeoutError` after a Returns a Future, which raises `tornado.util.TimeoutError` after a
timeout. timeout.
""" """
fut = Future()
if self._value:
fut.set_result(None)
return fut
self._waiters.add(fut)
fut.add_done_callback(lambda fut: self._waiters.remove(fut))
if timeout is None: if timeout is None:
return self._future return fut
else: else:
return gen.with_timeout(timeout, self._future) timeout_fut = gen.with_timeout(timeout, fut, quiet_exceptions=(CancelledError,))
# This is a slightly clumsy workaround for the fact that
# gen.with_timeout doesn't cancel its futures. Cancelling
# fut will remove it from the waiters list.
timeout_fut.add_done_callback(lambda tf: fut.cancel() if not fut.done() else None)
return timeout_fut
class _ReleasingContextManager(object): class _ReleasingContextManager(object):
@ -272,6 +293,8 @@ class Semaphore(_TimeoutGarbageCollector):
@gen.coroutine @gen.coroutine
def simulator(futures): def simulator(futures):
for f in futures: for f in futures:
# simulate the asynchronous passage of time
yield gen.moment
yield gen.moment yield gen.moment
f.set_result(None) f.set_result(None)
@ -388,7 +411,8 @@ class Semaphore(_TimeoutGarbageCollector):
self._waiters.append(waiter) self._waiters.append(waiter)
if timeout: if timeout:
def on_timeout(): def on_timeout():
waiter.set_exception(gen.TimeoutError()) if not waiter.done():
waiter.set_exception(gen.TimeoutError())
self._garbage_collect() self._garbage_collect()
io_loop = ioloop.IOLoop.current() io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout) timeout_handle = io_loop.add_timeout(timeout, on_timeout)
@ -458,7 +482,7 @@ class Lock(object):
``async with`` includes both the ``yield`` and the ``acquire`` ``async with`` includes both the ``yield`` and the ``acquire``
(just as it does with `threading.Lock`): (just as it does with `threading.Lock`):
>>> async def f(): # doctest: +SKIP >>> async def f2(): # doctest: +SKIP
... async with lock: ... async with lock:
... # Do something holding the lock. ... # Do something holding the lock.
... pass ... pass
@ -480,7 +504,7 @@ class Lock(object):
def acquire(self, timeout=None): def acquire(self, timeout=None):
"""Attempt to lock. Returns a Future. """Attempt to lock. Returns a Future.
Returns a Future, which raises `tornado.gen.TimeoutError` after a Returns a Future, which raises `tornado.util.TimeoutError` after a
timeout. timeout.
""" """
return self._block.acquire(timeout) return self._block.acquire(timeout)

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2012 Facebook # Copyright 2012 Facebook
# #
@ -102,7 +101,8 @@ class LogFormatter(logging.Formatter):
Added support for ``colorama``. Changed the constructor Added support for ``colorama``. Changed the constructor
signature to be compatible with `logging.config.dictConfig`. signature to be compatible with `logging.config.dictConfig`.
""" """
DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s' DEFAULT_FORMAT = \
'%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S' DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
DEFAULT_COLORS = { DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue logging.DEBUG: 4, # Blue
@ -115,13 +115,13 @@ class LogFormatter(logging.Formatter):
style='%', color=True, colors=DEFAULT_COLORS): style='%', color=True, colors=DEFAULT_COLORS):
r""" r"""
:arg bool color: Enables color support. :arg bool color: Enables color support.
:arg string fmt: Log message format. :arg str fmt: Log message format.
It will be applied to the attributes dict of log records. The It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on. depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color :arg dict colors: color mappings from logging level to terminal color
code code
:arg string datefmt: Datetime format. :arg str datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2 .. versionchanged:: 3.2
@ -177,7 +177,7 @@ class LogFormatter(logging.Formatter):
# bytestrings. This is a bit of a hacky place to do this, but # bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise # it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded # result are so useless (and tornado is fond of using utf8-encoded
# byte strings whereever possible). # byte strings wherever possible).
record.message = _safe_unicode(message) record.message = _safe_unicode(message)
except Exception as e: except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__) record.message = "Bad message (%r): %r" % (e, record.__dict__)

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2011 Facebook # Copyright 2011 Facebook
# #
@ -25,6 +24,7 @@ import socket
import stat import stat
from tornado.concurrent import dummy_executor, run_on_executor from tornado.concurrent import dummy_executor, run_on_executor
from tornado import gen
from tornado.ioloop import IOLoop from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec from tornado.platform.auto import set_close_exec
from tornado.util import PY3, Configurable, errno_from_exception from tornado.util import PY3, Configurable, errno_from_exception
@ -35,54 +35,20 @@ except ImportError:
# ssl is not available on Google App Engine # ssl is not available on Google App Engine
ssl = None ssl = None
try:
import certifi
except ImportError:
# certifi is optional as long as we have ssl.create_default_context.
if ssl is None or hasattr(ssl, 'create_default_context'):
certifi = None
else:
raise
if PY3: if PY3:
xrange = range xrange = range
if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+ if ssl is not None:
ssl_match_hostname = ssl.match_hostname # Note that the naming of ssl.Purpose is confusing; the purpose
SSLCertificateError = ssl.CertificateError # of a context is to authentiate the opposite side of the connection.
elif ssl is None: _client_ssl_defaults = ssl.create_default_context(
ssl_match_hostname = SSLCertificateError = None # type: ignore ssl.Purpose.SERVER_AUTH)
else: _server_ssl_defaults = ssl.create_default_context(
import backports.ssl_match_hostname ssl.Purpose.CLIENT_AUTH)
ssl_match_hostname = backports.ssl_match_hostname.match_hostname if hasattr(ssl, 'OP_NO_COMPRESSION'):
SSLCertificateError = backports.ssl_match_hostname.CertificateError # type: ignore # See netutil.ssl_options_to_context
_client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
if hasattr(ssl, 'SSLContext'): _server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
if hasattr(ssl, 'create_default_context'):
# Python 2.7.9+, 3.4+
# Note that the naming of ssl.Purpose is confusing; the purpose
# of a context is to authentiate the opposite side of the connection.
_client_ssl_defaults = ssl.create_default_context(
ssl.Purpose.SERVER_AUTH)
_server_ssl_defaults = ssl.create_default_context(
ssl.Purpose.CLIENT_AUTH)
else:
# Python 3.2-3.3
_client_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
_client_ssl_defaults.verify_mode = ssl.CERT_REQUIRED
_client_ssl_defaults.load_verify_locations(certifi.where())
_server_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(ssl, 'OP_NO_COMPRESSION'):
# Disable TLS compression to avoid CRIME and related attacks.
# This constant wasn't added until python 3.3.
_client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
_server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
elif ssl:
# Python 2.6-2.7.8
_client_ssl_defaults = dict(cert_reqs=ssl.CERT_REQUIRED,
ca_certs=certifi.where())
_server_ssl_defaults = {}
else: else:
# Google App Engine # Google App Engine
_client_ssl_defaults = dict(cert_reqs=None, _client_ssl_defaults = dict(cert_reqs=None,
@ -232,7 +198,7 @@ if hasattr(socket, 'AF_UNIX'):
return sock return sock
def add_accept_handler(sock, callback, io_loop=None): def add_accept_handler(sock, callback):
"""Adds an `.IOLoop` event handler to accept new connections on ``sock``. """Adds an `.IOLoop` event handler to accept new connections on ``sock``.
When a connection is accepted, ``callback(connection, address)`` will When a connection is accepted, ``callback(connection, address)`` will
@ -241,11 +207,17 @@ def add_accept_handler(sock, callback, io_loop=None):
is different from the ``callback(fd, events)`` signature used for is different from the ``callback(fd, events)`` signature used for
`.IOLoop` handlers. `.IOLoop` handlers.
.. versionchanged:: 4.1 A callable is returned which, when called, will remove the `.IOLoop`
The ``io_loop`` argument is deprecated. event handler and stop processing further incoming connections.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. versionchanged:: 5.0
A callable is returned (``None`` was returned before).
""" """
if io_loop is None: io_loop = IOLoop.current()
io_loop = IOLoop.current() removed = [False]
def accept_handler(fd, events): def accept_handler(fd, events):
# More connections may come in while we're handling callbacks; # More connections may come in while we're handling callbacks;
@ -260,6 +232,9 @@ def add_accept_handler(sock, callback, io_loop=None):
# heuristic for the number of connections we can reasonably # heuristic for the number of connections we can reasonably
# accept at once. # accept at once.
for i in xrange(_DEFAULT_BACKLOG): for i in xrange(_DEFAULT_BACKLOG):
if removed[0]:
# The socket was probably closed
return
try: try:
connection, address = sock.accept() connection, address = sock.accept()
except socket.error as e: except socket.error as e:
@ -273,8 +248,15 @@ def add_accept_handler(sock, callback, io_loop=None):
if errno_from_exception(e) == errno.ECONNABORTED: if errno_from_exception(e) == errno.ECONNABORTED:
continue continue
raise raise
set_close_exec(connection.fileno())
callback(connection, address) callback(connection, address)
def remove_handler():
io_loop.remove_handler(sock)
removed[0] = True
io_loop.add_handler(sock, accept_handler, IOLoop.READ) io_loop.add_handler(sock, accept_handler, IOLoop.READ)
return remove_handler
def is_valid_ip(ip): def is_valid_ip(ip):
@ -310,11 +292,16 @@ class Resolver(Configurable):
The implementations of this interface included with Tornado are The implementations of this interface included with Tornado are
* `tornado.netutil.BlockingResolver` * `tornado.netutil.DefaultExecutorResolver`
* `tornado.netutil.ThreadedResolver` * `tornado.netutil.BlockingResolver` (deprecated)
* `tornado.netutil.ThreadedResolver` (deprecated)
* `tornado.netutil.OverrideResolver` * `tornado.netutil.OverrideResolver`
* `tornado.platform.twisted.TwistedResolver` * `tornado.platform.twisted.TwistedResolver`
* `tornado.platform.caresresolver.CaresResolver` * `tornado.platform.caresresolver.CaresResolver`
.. versionchanged:: 5.0
The default implementation has changed from `BlockingResolver` to
`DefaultExecutorResolver`.
""" """
@classmethod @classmethod
def configurable_base(cls): def configurable_base(cls):
@ -322,7 +309,7 @@ class Resolver(Configurable):
@classmethod @classmethod
def configurable_default(cls): def configurable_default(cls):
return BlockingResolver return DefaultExecutorResolver
def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None): def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None):
"""Resolves an address. """Resolves an address.
@ -341,6 +328,10 @@ class Resolver(Configurable):
.. versionchanged:: 4.4 .. versionchanged:: 4.4
Standardized all implementations to raise `IOError`. Standardized all implementations to raise `IOError`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
""" """
raise NotImplementedError() raise NotImplementedError()
@ -353,6 +344,31 @@ class Resolver(Configurable):
pass pass
def _resolve_addr(host, port, family=socket.AF_UNSPEC):
# On Solaris, getaddrinfo fails if the given port is not found
# in /etc/services and no socket type is given, so we must pass
# one here. The socket type used here doesn't seem to actually
# matter (we discard the one we get back in the results),
# so the addresses we return should still be usable with SOCK_DGRAM.
addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
results = []
for family, socktype, proto, canonname, address in addrinfo:
results.append((family, address))
return results
class DefaultExecutorResolver(Resolver):
"""Resolver implementation using `.IOLoop.run_in_executor`.
.. versionadded:: 5.0
"""
@gen.coroutine
def resolve(self, host, port, family=socket.AF_UNSPEC):
result = yield IOLoop.current().run_in_executor(
None, _resolve_addr, host, port, family)
raise gen.Return(result)
class ExecutorResolver(Resolver): class ExecutorResolver(Resolver):
"""Resolver implementation using a `concurrent.futures.Executor`. """Resolver implementation using a `concurrent.futures.Executor`.
@ -363,11 +379,15 @@ class ExecutorResolver(Resolver):
``close_resolver=False``; use this if you want to reuse the same ``close_resolver=False``; use this if you want to reuse the same
executor elsewhere. executor elsewhere.
.. versionchanged:: 4.1 .. versionchanged:: 5.0
The ``io_loop`` argument is deprecated. The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. deprecated:: 5.0
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
of this class.
""" """
def initialize(self, io_loop=None, executor=None, close_executor=True): def initialize(self, executor=None, close_executor=True):
self.io_loop = io_loop or IOLoop.current() self.io_loop = IOLoop.current()
if executor is not None: if executor is not None:
self.executor = executor self.executor = executor
self.close_executor = close_executor self.close_executor = close_executor
@ -382,16 +402,7 @@ class ExecutorResolver(Resolver):
@run_on_executor @run_on_executor
def resolve(self, host, port, family=socket.AF_UNSPEC): def resolve(self, host, port, family=socket.AF_UNSPEC):
# On Solaris, getaddrinfo fails if the given port is not found return _resolve_addr(host, port, family)
# in /etc/services and no socket type is given, so we must pass
# one here. The socket type used here doesn't seem to actually
# matter (we discard the one we get back in the results),
# so the addresses we return should still be usable with SOCK_DGRAM.
addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
results = []
for family, socktype, proto, canonname, address in addrinfo:
results.append((family, address))
return results
class BlockingResolver(ExecutorResolver): class BlockingResolver(ExecutorResolver):
@ -399,9 +410,13 @@ class BlockingResolver(ExecutorResolver):
The `.IOLoop` will be blocked during the resolution, although the The `.IOLoop` will be blocked during the resolution, although the
callback will not be run until the next `.IOLoop` iteration. callback will not be run until the next `.IOLoop` iteration.
.. deprecated:: 5.0
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
of this class.
""" """
def initialize(self, io_loop=None): def initialize(self):
super(BlockingResolver, self).initialize(io_loop=io_loop) super(BlockingResolver, self).initialize()
class ThreadedResolver(ExecutorResolver): class ThreadedResolver(ExecutorResolver):
@ -419,14 +434,18 @@ class ThreadedResolver(ExecutorResolver):
.. versionchanged:: 3.1 .. versionchanged:: 3.1
All ``ThreadedResolvers`` share a single thread pool, whose All ``ThreadedResolvers`` share a single thread pool, whose
size is set by the first one to be created. size is set by the first one to be created.
.. deprecated:: 5.0
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
of this class.
""" """
_threadpool = None # type: ignore _threadpool = None # type: ignore
_threadpool_pid = None # type: int _threadpool_pid = None # type: int
def initialize(self, io_loop=None, num_threads=10): def initialize(self, num_threads=10):
threadpool = ThreadedResolver._create_threadpool(num_threads) threadpool = ThreadedResolver._create_threadpool(num_threads)
super(ThreadedResolver, self).initialize( super(ThreadedResolver, self).initialize(
io_loop=io_loop, executor=threadpool, close_executor=False) executor=threadpool, close_executor=False)
@classmethod @classmethod
def _create_threadpool(cls, num_threads): def _create_threadpool(cls, num_threads):
@ -448,7 +467,21 @@ class OverrideResolver(Resolver):
This can be used to make local DNS changes (e.g. for testing) This can be used to make local DNS changes (e.g. for testing)
without modifying system-wide settings. without modifying system-wide settings.
The mapping can contain either host strings or host-port pairs. The mapping can be in three formats::
{
# Hostname to host or ip
"example.com": "127.0.1.1",
# Host+port to host+port
("login.example.com", 443): ("localhost", 1443),
# Host+port+address family to host+port
("login.example.com", 443, socket.AF_INET6): ("::1", 1443),
}
.. versionchanged:: 5.0
Added support for host-port-family triplets.
""" """
def initialize(self, resolver, mapping): def initialize(self, resolver, mapping):
self.resolver = resolver self.resolver = resolver
@ -457,12 +490,14 @@ class OverrideResolver(Resolver):
def close(self): def close(self):
self.resolver.close() self.resolver.close()
def resolve(self, host, port, *args, **kwargs): def resolve(self, host, port, family=socket.AF_UNSPEC, *args, **kwargs):
if (host, port) in self.mapping: if (host, port, family) in self.mapping:
host, port = self.mapping[(host, port, family)]
elif (host, port) in self.mapping:
host, port = self.mapping[(host, port)] host, port = self.mapping[(host, port)]
elif host in self.mapping: elif host in self.mapping:
host = self.mapping[host] host = self.mapping[host]
return self.resolver.resolve(host, port, *args, **kwargs) return self.resolver.resolve(host, port, family, *args, **kwargs)
# These are the keyword arguments to ssl.wrap_socket that must be translated # These are the keyword arguments to ssl.wrap_socket that must be translated
@ -483,11 +518,12 @@ def ssl_options_to_context(ssl_options):
accepts both forms needs to upgrade to the `~ssl.SSLContext` version accepts both forms needs to upgrade to the `~ssl.SSLContext` version
to use features like SNI or NPN. to use features like SNI or NPN.
""" """
if isinstance(ssl_options, dict): if isinstance(ssl_options, ssl.SSLContext):
assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
if (not hasattr(ssl, 'SSLContext') or
isinstance(ssl_options, ssl.SSLContext)):
return ssl_options return ssl_options
assert isinstance(ssl_options, dict)
assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
# Can't use create_default_context since this interface doesn't
# tell us client vs server.
context = ssl.SSLContext( context = ssl.SSLContext(
ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23)) ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23))
if 'certfile' in ssl_options: if 'certfile' in ssl_options:
@ -500,7 +536,9 @@ def ssl_options_to_context(ssl_options):
context.set_ciphers(ssl_options['ciphers']) context.set_ciphers(ssl_options['ciphers'])
if hasattr(ssl, 'OP_NO_COMPRESSION'): if hasattr(ssl, 'OP_NO_COMPRESSION'):
# Disable TLS compression to avoid CRIME and related attacks. # Disable TLS compression to avoid CRIME and related attacks.
# This constant wasn't added until python 3.3. # This constant depends on openssl version 1.0.
# TODO: Do we need to do this ourselves or can we trust
# the defaults?
context.options |= ssl.OP_NO_COMPRESSION context.options |= ssl.OP_NO_COMPRESSION
return context return context
@ -515,14 +553,13 @@ def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs):
appropriate). appropriate).
""" """
context = ssl_options_to_context(ssl_options) context = ssl_options_to_context(ssl_options)
if hasattr(ssl, 'SSLContext') and isinstance(context, ssl.SSLContext): if ssl.HAS_SNI:
if server_hostname is not None and getattr(ssl, 'HAS_SNI'): # In python 3.4, wrap_socket only accepts the server_hostname
# Python doesn't have server-side SNI support so we can't # argument if HAS_SNI is true.
# really unittest this, but it can be manually tested with # TODO: add a unittest (python added server-side SNI support in 3.4)
# python3.2 -m tornado.httpclient https://sni.velox.ch # In the meantime it can be manually tested with
return context.wrap_socket(socket, server_hostname=server_hostname, # python3 -m tornado.httpclient https://sni.velox.ch
**kwargs) return context.wrap_socket(socket, server_hostname=server_hostname,
else: **kwargs)
return context.wrap_socket(socket, **kwargs)
else: else:
return ssl.wrap_socket(socket, **dict(context, **kwargs)) # type: ignore return context.wrap_socket(socket, **kwargs)

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #
@ -16,9 +15,19 @@
"""A command line parsing module that lets modules define their own options. """A command line parsing module that lets modules define their own options.
Each module defines its own options which are added to the global This module is inspired by Google's `gflags
option namespace, e.g.:: <https://github.com/google/python-gflags>`_. The primary difference
with libraries such as `argparse` is that a global registry is used so
that options may be defined in any module (it also enables
`tornado.log` by default). The rest of Tornado does not depend on this
module, so feel free to use `argparse` or other configuration
libraries if you prefer them.
Options must be defined with `tornado.options.define` before use,
generally at the top level of a module. The options are then
accessible as attributes of `tornado.options.options`::
# myapp/db.py
from tornado.options import define, options from tornado.options import define, options
define("mysql_host", default="127.0.0.1:3306", help="Main user DB") define("mysql_host", default="127.0.0.1:3306", help="Main user DB")
@ -29,34 +38,36 @@ option namespace, e.g.::
db = database.Connection(options.mysql_host) db = database.Connection(options.mysql_host)
... ...
# myapp/server.py
from tornado.options import define, options
define("port", default=8080, help="port to listen on")
def start_server():
app = make_app()
app.listen(options.port)
The ``main()`` method of your application does not need to be aware of all of The ``main()`` method of your application does not need to be aware of all of
the options used throughout your program; they are all automatically loaded the options used throughout your program; they are all automatically loaded
when the modules are loaded. However, all modules that define options when the modules are loaded. However, all modules that define options
must have been imported before the command line is parsed. must have been imported before the command line is parsed.
Your ``main()`` method can parse the command line or parse a config file with Your ``main()`` method can parse the command line or parse a config file with
either:: either `parse_command_line` or `parse_config_file`::
tornado.options.parse_command_line() import myapp.db, myapp.server
# or import tornado.options
tornado.options.parse_config_file("/etc/server.conf")
.. note: if __name__ == '__main__':
tornado.options.parse_command_line()
# or
tornado.options.parse_config_file("/etc/server.conf")
When using tornado.options.parse_command_line or .. note::
tornado.options.parse_config_file, the only options that are set are
ones that were previously defined with tornado.options.define.
Command line formats are what you would expect (``--myoption=myvalue``). When using multiple ``parse_*`` functions, pass ``final=False`` to all
Config files are just Python files. Global names become options, e.g.:: but the last one, or side effects may occur twice (in particular,
this can result in log messages being doubled).
myoption = "myvalue"
myotheroption = "myothervalue"
We support `datetimes <datetime.datetime>`, `timedeltas
<datetime.timedelta>`, ints, and floats (just pass a ``type`` kwarg to
`define`). We also accept multi-value options. See the documentation for
`define()` below.
`tornado.options.options` is a singleton instance of `OptionParser`, and `tornado.options.options` is a singleton instance of `OptionParser`, and
the top-level functions in this module (`define`, `parse_command_line`, etc) the top-level functions in this module (`define`, `parse_command_line`, etc)
@ -80,6 +91,7 @@ instances to define isolated sets of options, such as for subcommands.
options can be defined, set, and read with any mix of the two. options can be defined, set, and read with any mix of the two.
Dashes are typical for command-line usage while config files require Dashes are typical for command-line usage while config files require
underscores. underscores.
""" """
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
@ -190,13 +202,13 @@ class OptionParser(object):
multiple=False, group=None, callback=None): multiple=False, group=None, callback=None):
"""Defines a new command line option. """Defines a new command line option.
If ``type`` is given (one of str, float, int, datetime, or timedelta) ``type`` can be any of `str`, `int`, `float`, `bool`,
or can be inferred from the ``default``, we parse the command line `~datetime.datetime`, or `~datetime.timedelta`. If no ``type``
arguments based on the given type. If ``multiple`` is True, we accept is given but a ``default`` is, ``type`` is the type of
comma-separated values, and the option value is always a list. ``default``. Otherwise, ``type`` defaults to `str`.
For multi-value integers, we also accept the syntax ``x:y``, which If ``multiple`` is True, the option value is a list of ``type``
turns into ``range(x, y)`` - very useful for long integer ranges. instead of an instance of ``type``.
``help`` and ``metavar`` are used to construct the ``help`` and ``metavar`` are used to construct the
automatically generated command line help string. The help automatically generated command line help string. The help
@ -208,9 +220,7 @@ class OptionParser(object):
groups. By default, command line options are grouped by the groups. By default, command line options are grouped by the
file in which they are defined. file in which they are defined.
Command line option names must be unique globally. They can be parsed Command line option names must be unique globally.
from the command line with `parse_command_line` or parsed from a
config file with `parse_config_file`.
If a ``callback`` is given, it will be run with the new value whenever If a ``callback`` is given, it will be run with the new value whenever
the option is changed. This can be used to combine command-line the option is changed. This can be used to combine command-line
@ -222,10 +232,12 @@ class OptionParser(object):
With this definition, options in the file specified by ``--config`` will With this definition, options in the file specified by ``--config`` will
override options set earlier on the command line, but can be overridden override options set earlier on the command line, but can be overridden
by later flags. by later flags.
""" """
if name in self._options: normalized = self._normalize_name(name)
if normalized in self._options:
raise Error("Option %r already defined in %s" % raise Error("Option %r already defined in %s" %
(name, self._options[name].file_name)) (normalized, self._options[normalized].file_name))
frame = sys._getframe(0) frame = sys._getframe(0)
options_file = frame.f_code.co_filename options_file = frame.f_code.co_filename
@ -247,7 +259,6 @@ class OptionParser(object):
group_name = group group_name = group
else: else:
group_name = file_name group_name = file_name
normalized = self._normalize_name(name)
option = _Option(name, file_name=file_name, option = _Option(name, file_name=file_name,
default=default, type=type, help=help, default=default, type=type, help=help,
metavar=metavar, multiple=multiple, metavar=metavar, multiple=multiple,
@ -259,6 +270,14 @@ class OptionParser(object):
"""Parses all options given on the command line (defaults to """Parses all options given on the command line (defaults to
`sys.argv`). `sys.argv`).
Options look like ``--option=value`` and are parsed according
to their ``type``. For boolean options, ``--option`` is
equivalent to ``--option=true``
If the option has ``multiple=True``, comma-separated values
are accepted. For multi-value integer options, the syntax
``x:y`` is also accepted and equivalent to ``range(x, y)``.
Note that ``args[0]`` is ignored since it is the program name Note that ``args[0]`` is ignored since it is the program name
in `sys.argv`. in `sys.argv`.
@ -267,6 +286,7 @@ class OptionParser(object):
If ``final`` is ``False``, parse callbacks will not be run. If ``final`` is ``False``, parse callbacks will not be run.
This is useful for applications that wish to combine configurations This is useful for applications that wish to combine configurations
from multiple sources. from multiple sources.
""" """
if args is None: if args is None:
args = sys.argv args = sys.argv
@ -299,12 +319,37 @@ class OptionParser(object):
return remaining return remaining
def parse_config_file(self, path, final=True): def parse_config_file(self, path, final=True):
"""Parses and loads the Python config file at the given path. """Parses and loads the config file at the given path.
The config file contains Python code that will be executed (so
it is **not safe** to use untrusted config files). Anything in
the global namespace that matches a defined option will be
used to set that option's value.
Options are not parsed from strings as they would be on the
command line; they should be set to the correct type (this
means if you have ``datetime`` or ``timedelta`` options you
will need to import those modules in the config file.
Example (using the options defined in the top-level docs of
this module)::
port = 80
mysql_host = 'mydb.example.com:3306'
memcache_hosts = ['cache1.example.com:11011',
'cache2.example.com:11011']
If ``final`` is ``False``, parse callbacks will not be run. If ``final`` is ``False``, parse callbacks will not be run.
This is useful for applications that wish to combine configurations This is useful for applications that wish to combine configurations
from multiple sources. from multiple sources.
.. note::
`tornado.options` is primarily a command-line library.
Config file support is provided for applications that wish
to use it, but applications that prefer config files may
wish to look at other libraries instead.
.. versionchanged:: 4.1 .. versionchanged:: 4.1
Config files are now always interpreted as utf-8 instead of Config files are now always interpreted as utf-8 instead of
the system default encoding. the system default encoding.
@ -312,6 +357,7 @@ class OptionParser(object):
.. versionchanged:: 4.4 .. versionchanged:: 4.4
The special variable ``__file__`` is available inside config The special variable ``__file__`` is available inside config
files, specifying the absolute path to the config file itself. files, specifying the absolute path to the config file itself.
""" """
config = {'__file__': os.path.abspath(path)} config = {'__file__': os.path.abspath(path)}
with open(path, 'rb') as f: with open(path, 'rb') as f:

View file

@ -3,14 +3,14 @@
.. versionadded:: 3.2 .. versionadded:: 3.2
This module integrates Tornado with the ``asyncio`` module introduced This module integrates Tornado with the ``asyncio`` module introduced
in Python 3.4 (and available `as a separate download in Python 3.4. This makes it possible to combine the two libraries on
<https://pypi.python.org/pypi/asyncio>`_ for Python 3.3). This makes the same event loop.
it possible to combine the two libraries on the same event loop.
Most applications should use `AsyncIOMainLoop` to run Tornado on the .. deprecated:: 5.0
default ``asyncio`` event loop. Applications that need to run event
loops on multiple threads may use `AsyncIOLoop` to create multiple While the code in this module is still used, it is now enabled
loops. automatically when `asyncio` is available, so applications should
no longer need to refer to this module directly.
.. note:: .. note::
@ -22,35 +22,38 @@ loops.
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
import functools import functools
import tornado.concurrent
from tornado.gen import convert_yielded from tornado.gen import convert_yielded
from tornado.ioloop import IOLoop from tornado.ioloop import IOLoop
from tornado import stack_context from tornado import stack_context
try: import asyncio
# Import the real asyncio module for py33+ first. Older versions of the
# trollius backport also use this name.
import asyncio # type: ignore
except ImportError as e:
# Asyncio itself isn't available; see if trollius is (backport to py26+).
try:
import trollius as asyncio # type: ignore
except ImportError:
# Re-raise the original asyncio error, not the trollius one.
raise e
class BaseAsyncIOLoop(IOLoop): class BaseAsyncIOLoop(IOLoop):
def initialize(self, asyncio_loop, close_loop=False, **kwargs): def initialize(self, asyncio_loop, **kwargs):
super(BaseAsyncIOLoop, self).initialize(**kwargs)
self.asyncio_loop = asyncio_loop self.asyncio_loop = asyncio_loop
self.close_loop = close_loop
# Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler) # Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
self.handlers = {} self.handlers = {}
# Set of fds listening for reads/writes # Set of fds listening for reads/writes
self.readers = set() self.readers = set()
self.writers = set() self.writers = set()
self.closing = False self.closing = False
# If an asyncio loop was closed through an asyncio interface
# instead of IOLoop.close(), we'd never hear about it and may
# have left a dangling reference in our map. In case an
# application (or, more likely, a test suite) creates and
# destroys a lot of event loops in this way, check here to
# ensure that we don't have a lot of dead loops building up in
# the map.
#
# TODO(bdarnell): consider making self.asyncio_loop a weakref
# for AsyncIOMainLoop and make _ioloop_for_asyncio a
# WeakKeyDictionary.
for loop in list(IOLoop._ioloop_for_asyncio):
if loop.is_closed():
del IOLoop._ioloop_for_asyncio[loop]
IOLoop._ioloop_for_asyncio[asyncio_loop] = self
super(BaseAsyncIOLoop, self).initialize(**kwargs)
def close(self, all_fds=False): def close(self, all_fds=False):
self.closing = True self.closing = True
@ -59,8 +62,8 @@ class BaseAsyncIOLoop(IOLoop):
self.remove_handler(fd) self.remove_handler(fd)
if all_fds: if all_fds:
self.close_fd(fileobj) self.close_fd(fileobj)
if self.close_loop: self.asyncio_loop.close()
self.asyncio_loop.close() del IOLoop._ioloop_for_asyncio[self.asyncio_loop]
def add_handler(self, fd, handler, events): def add_handler(self, fd, handler, events):
fd, fileobj = self.split_fd(fd) fd, fileobj = self.split_fd(fd)
@ -114,16 +117,16 @@ class BaseAsyncIOLoop(IOLoop):
handler_func(fileobj, events) handler_func(fileobj, events)
def start(self): def start(self):
old_current = IOLoop.current(instance=False) try:
old_loop = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
old_loop = None
try: try:
self._setup_logging() self._setup_logging()
self.make_current() asyncio.set_event_loop(self.asyncio_loop)
self.asyncio_loop.run_forever() self.asyncio_loop.run_forever()
finally: finally:
if old_current is None: asyncio.set_event_loop(old_loop)
IOLoop.clear_current()
else:
old_current.make_current()
def stop(self): def stop(self):
self.asyncio_loop.stop() self.asyncio_loop.stop()
@ -140,67 +143,110 @@ class BaseAsyncIOLoop(IOLoop):
timeout.cancel() timeout.cancel()
def add_callback(self, callback, *args, **kwargs): def add_callback(self, callback, *args, **kwargs):
if self.closing: try:
# TODO: this is racy; we need a lock to ensure that the self.asyncio_loop.call_soon_threadsafe(
# loop isn't closed during call_soon_threadsafe. self._run_callback,
raise RuntimeError("IOLoop is closing") functools.partial(stack_context.wrap(callback), *args, **kwargs))
self.asyncio_loop.call_soon_threadsafe( except RuntimeError:
self._run_callback, # "Event loop is closed". Swallow the exception for
functools.partial(stack_context.wrap(callback), *args, **kwargs)) # consistency with PollIOLoop (and logical consistency
# with the fact that we can't guarantee that an
# add_callback that completes without error will
# eventually execute).
pass
add_callback_from_signal = add_callback add_callback_from_signal = add_callback
def run_in_executor(self, executor, func, *args):
return self.asyncio_loop.run_in_executor(executor, func, *args)
def set_default_executor(self, executor):
return self.asyncio_loop.set_default_executor(executor)
class AsyncIOMainLoop(BaseAsyncIOLoop): class AsyncIOMainLoop(BaseAsyncIOLoop):
"""``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the """``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the
current ``asyncio`` event loop (i.e. the one returned by current ``asyncio`` event loop (i.e. the one returned by
``asyncio.get_event_loop()``). Recommended usage:: ``asyncio.get_event_loop()``).
from tornado.platform.asyncio import AsyncIOMainLoop .. deprecated:: 5.0
import asyncio
AsyncIOMainLoop().install()
asyncio.get_event_loop().run_forever()
See also :meth:`tornado.ioloop.IOLoop.install` for general notes on Now used automatically when appropriate; it is no longer necessary
installing alternative IOLoops. to refer to this class directly.
.. versionchanged:: 5.0
Closing an `AsyncIOMainLoop` now closes the underlying asyncio loop.
""" """
def initialize(self, **kwargs): def initialize(self, **kwargs):
super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(), super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(), **kwargs)
close_loop=False, **kwargs)
def make_current(self):
# AsyncIOMainLoop already refers to the current asyncio loop so
# nothing to do here.
pass
class AsyncIOLoop(BaseAsyncIOLoop): class AsyncIOLoop(BaseAsyncIOLoop):
"""``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop. """``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop.
This class follows the usual Tornado semantics for creating new This class follows the usual Tornado semantics for creating new
``IOLoops``; these loops are not necessarily related to the ``IOLoops``; these loops are not necessarily related to the
``asyncio`` default event loop. Recommended usage:: ``asyncio`` default event loop.
from tornado.ioloop import IOLoop
IOLoop.configure('tornado.platform.asyncio.AsyncIOLoop')
IOLoop.current().start()
Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object
can be accessed with the ``asyncio_loop`` attribute. can be accessed with the ``asyncio_loop`` attribute.
.. versionchanged:: 5.0
When an ``AsyncIOLoop`` becomes the current `.IOLoop`, it also sets
the current `asyncio` event loop.
.. deprecated:: 5.0
Now used automatically when appropriate; it is no longer necessary
to refer to this class directly.
""" """
def initialize(self, **kwargs): def initialize(self, **kwargs):
self.is_current = False
loop = asyncio.new_event_loop() loop = asyncio.new_event_loop()
try: try:
super(AsyncIOLoop, self).initialize(loop, close_loop=True, **kwargs) super(AsyncIOLoop, self).initialize(loop, **kwargs)
except Exception: except Exception:
# If initialize() does not succeed (taking ownership of the loop), # If initialize() does not succeed (taking ownership of the loop),
# we have to close it. # we have to close it.
loop.close() loop.close()
raise raise
def close(self, all_fds=False):
if self.is_current:
self.clear_current()
super(AsyncIOLoop, self).close(all_fds=all_fds)
def make_current(self):
if not self.is_current:
try:
self.old_asyncio = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
self.old_asyncio = None
self.is_current = True
asyncio.set_event_loop(self.asyncio_loop)
def _clear_current_hook(self):
if self.is_current:
asyncio.set_event_loop(self.old_asyncio)
self.is_current = False
def to_tornado_future(asyncio_future): def to_tornado_future(asyncio_future):
"""Convert an `asyncio.Future` to a `tornado.concurrent.Future`. """Convert an `asyncio.Future` to a `tornado.concurrent.Future`.
.. versionadded:: 4.1 .. versionadded:: 4.1
.. deprecated:: 5.0
Tornado ``Futures`` have been merged with `asyncio.Future`,
so this method is now a no-op.
""" """
tf = tornado.concurrent.Future() return asyncio_future
tornado.concurrent.chain_future(asyncio_future, tf)
return tf
def to_asyncio_future(tornado_future): def to_asyncio_future(tornado_future):
@ -211,12 +257,38 @@ def to_asyncio_future(tornado_future):
.. versionchanged:: 4.3 .. versionchanged:: 4.3
Now accepts any yieldable object, not just Now accepts any yieldable object, not just
`tornado.concurrent.Future`. `tornado.concurrent.Future`.
.. deprecated:: 5.0
Tornado ``Futures`` have been merged with `asyncio.Future`,
so this method is now equivalent to `tornado.gen.convert_yielded`.
""" """
tornado_future = convert_yielded(tornado_future) return convert_yielded(tornado_future)
af = asyncio.Future()
tornado.concurrent.chain_future(tornado_future, af)
return af
if hasattr(convert_yielded, 'register'): class AnyThreadEventLoopPolicy(asyncio.DefaultEventLoopPolicy):
convert_yielded.register(asyncio.Future, to_tornado_future) # type: ignore """Event loop policy that allows loop creation on any thread.
The default `asyncio` event loop policy only automatically creates
event loops in the main threads. Other threads must create event
loops explicitly or `asyncio.get_event_loop` (and therefore
`.IOLoop.current`) will fail. Installing this policy allows event
loops to be created automatically on any thread, matching the
behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
Usage::
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
.. versionadded:: 5.0
"""
def get_event_loop(self):
try:
return super().get_event_loop()
except (RuntimeError, AssertionError):
# This was an AssertionError in python 3.4.2 (which ships with debian jessie)
# and changed to a RuntimeError in 3.4.3.
# "There is no current event loop in thread %r"
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2011 Facebook # Copyright 2011 Facebook
# #

View file

@ -19,11 +19,11 @@ class CaresResolver(Resolver):
the default for ``tornado.simple_httpclient``, but other libraries the default for ``tornado.simple_httpclient``, but other libraries
may default to ``AF_UNSPEC``. may default to ``AF_UNSPEC``.
.. versionchanged:: 4.1 .. versionchanged:: 5.0
The ``io_loop`` argument is deprecated. The ``io_loop`` argument (deprecated since version 4.1) has been removed.
""" """
def initialize(self, io_loop=None): def initialize(self):
self.io_loop = io_loop or IOLoop.current() self.io_loop = IOLoop.current()
self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb) self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
self.fds = {} self.fds = {}

View file

@ -32,10 +32,12 @@ class Waker(interface.Waker):
and Jython. and Jython.
""" """
def __init__(self): def __init__(self):
from .auto import set_close_exec
# Based on Zope select_trigger.py: # Based on Zope select_trigger.py:
# https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py # https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py
self.writer = socket.socket() self.writer = socket.socket()
set_close_exec(self.writer.fileno())
# Disable buffering -- pulling the trigger sends 1 byte, # Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up ASAP. # and we want that sent immediately, to wake up ASAP.
self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
@ -54,6 +56,7 @@ class Waker(interface.Waker):
# http://mail.zope.org/pipermail/zope/2005-July/160433.html # http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details. # for hideous details.
a = socket.socket() a = socket.socket()
set_close_exec(a.fileno())
a.bind(("127.0.0.1", 0)) a.bind(("127.0.0.1", 0))
a.listen(1) a.listen(1)
connect_address = a.getsockname() # assigned (host, port) pair connect_address = a.getsockname() # assigned (host, port) pair
@ -78,6 +81,7 @@ class Waker(interface.Waker):
a.close() a.close()
self.reader, addr = a.accept() self.reader, addr = a.accept()
set_close_exec(self.reader.fileno())
self.reader.setblocking(0) self.reader.setblocking(0)
self.writer.setblocking(0) self.writer.setblocking(0)
a.close() a.close()

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2012 Facebook # Copyright 2012 Facebook
# #

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2011 Facebook # Copyright 2011 Facebook
# #

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2012 Facebook # Copyright 2012 Facebook
# #

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2011 Facebook # Copyright 2011 Facebook
# #

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2012 Facebook # Copyright 2012 Facebook
# #

View file

@ -32,7 +32,7 @@ import sys
import twisted.internet.abstract # type: ignore import twisted.internet.abstract # type: ignore
from twisted.internet.defer import Deferred # type: ignore from twisted.internet.defer import Deferred # type: ignore
from twisted.internet.posixbase import PosixReactorBase # type: ignore from twisted.internet.posixbase import PosixReactorBase # type: ignore
from twisted.internet.interfaces import IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor # type: ignore from twisted.internet.interfaces import IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor # type: ignore # noqa: E501
from twisted.python import failure, log # type: ignore from twisted.python import failure, log # type: ignore
from twisted.internet import error # type: ignore from twisted.internet import error # type: ignore
import twisted.names.cache # type: ignore import twisted.names.cache # type: ignore
@ -42,7 +42,7 @@ import twisted.names.resolve # type: ignore
from zope.interface import implementer # type: ignore from zope.interface import implementer # type: ignore
from tornado.concurrent import Future from tornado.concurrent import Future, future_set_exc_info
from tornado.escape import utf8 from tornado.escape import utf8
from tornado import gen from tornado import gen
import tornado.ioloop import tornado.ioloop
@ -112,7 +112,7 @@ class TornadoReactor(PosixReactorBase):
instead of ``reactor.run()``. instead of ``reactor.run()``.
It is also possible to create a non-global reactor by calling It is also possible to create a non-global reactor by calling
``tornado.platform.twisted.TornadoReactor(io_loop)``. However, if ``tornado.platform.twisted.TornadoReactor()``. However, if
the `.IOLoop` and reactor are to be short-lived (such as those used in the `.IOLoop` and reactor are to be short-lived (such as those used in
unit tests), additional cleanup may be required. Specifically, it is unit tests), additional cleanup may be required. Specifically, it is
recommended to call:: recommended to call::
@ -122,13 +122,11 @@ class TornadoReactor(PosixReactorBase):
before closing the `.IOLoop`. before closing the `.IOLoop`.
.. versionchanged:: 4.1 .. versionchanged:: 5.0
The ``io_loop`` argument is deprecated. The ``io_loop`` argument (deprecated since version 4.1) has been removed.
""" """
def __init__(self, io_loop=None): def __init__(self):
if not io_loop: self._io_loop = tornado.ioloop.IOLoop.current()
io_loop = tornado.ioloop.IOLoop.current()
self._io_loop = io_loop
self._readers = {} # map of reader objects to fd self._readers = {} # map of reader objects to fd
self._writers = {} # map of writer objects to fd self._writers = {} # map of writer objects to fd
self._fds = {} # a map of fd to a (reader, writer) tuple self._fds = {} # a map of fd to a (reader, writer) tuple
@ -319,7 +317,10 @@ class _TestReactor(TornadoReactor):
""" """
def __init__(self): def __init__(self):
# always use a new ioloop # always use a new ioloop
super(_TestReactor, self).__init__(IOLoop()) IOLoop.clear_current()
IOLoop(make_current=True)
super(_TestReactor, self).__init__()
IOLoop.clear_current()
def listenTCP(self, port, factory, backlog=50, interface=''): def listenTCP(self, port, factory, backlog=50, interface=''):
# default to localhost to avoid firewall prompts on the mac # default to localhost to avoid firewall prompts on the mac
@ -335,7 +336,7 @@ class _TestReactor(TornadoReactor):
port, protocol, interface=interface, maxPacketSize=maxPacketSize) port, protocol, interface=interface, maxPacketSize=maxPacketSize)
def install(io_loop=None): def install():
"""Install this package as the default Twisted reactor. """Install this package as the default Twisted reactor.
``install()`` must be called very early in the startup process, ``install()`` must be called very early in the startup process,
@ -346,13 +347,11 @@ def install(io_loop=None):
in multi-process mode, and an external process manager such as in multi-process mode, and an external process manager such as
``supervisord`` is recommended instead. ``supervisord`` is recommended instead.
.. versionchanged:: 4.1 .. versionchanged:: 5.0
The ``io_loop`` argument is deprecated. The ``io_loop`` argument (deprecated since version 4.1) has been removed.
""" """
if not io_loop: reactor = TornadoReactor()
io_loop = tornado.ioloop.IOLoop.current()
reactor = TornadoReactor(io_loop)
from twisted.internet.main import installReactor # type: ignore from twisted.internet.main import installReactor # type: ignore
installReactor(reactor) installReactor(reactor)
return reactor return reactor
@ -384,6 +383,8 @@ class _FD(object):
self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR) self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR)
self.lost = True self.lost = True
writeConnectionLost = readConnectionLost = connectionLost
def logPrefix(self): def logPrefix(self):
return '' return ''
@ -519,21 +520,20 @@ class TwistedResolver(Resolver):
recommended only when threads cannot be used, since it has recommended only when threads cannot be used, since it has
limitations compared to the standard ``getaddrinfo``-based limitations compared to the standard ``getaddrinfo``-based
`~tornado.netutil.Resolver` and `~tornado.netutil.Resolver` and
`~tornado.netutil.ThreadedResolver`. Specifically, it returns at `~tornado.netutil.DefaultExecutorResolver`. Specifically, it returns at
most one result, and arguments other than ``host`` and ``family`` most one result, and arguments other than ``host`` and ``family``
are ignored. It may fail to resolve when ``family`` is not are ignored. It may fail to resolve when ``family`` is not
``socket.AF_UNSPEC``. ``socket.AF_UNSPEC``.
Requires Twisted 12.1 or newer. Requires Twisted 12.1 or newer.
.. versionchanged:: 4.1 .. versionchanged:: 5.0
The ``io_loop`` argument is deprecated. The ``io_loop`` argument (deprecated since version 4.1) has been removed.
""" """
def initialize(self, io_loop=None): def initialize(self):
self.io_loop = io_loop or IOLoop.current()
# partial copy of twisted.names.client.createResolver, which doesn't # partial copy of twisted.names.client.createResolver, which doesn't
# allow for a reactor to be passed in. # allow for a reactor to be passed in.
self.reactor = tornado.platform.twisted.TornadoReactor(io_loop) self.reactor = tornado.platform.twisted.TornadoReactor()
host_resolver = twisted.names.hosts.Resolver('/etc/hosts') host_resolver = twisted.names.hosts.Resolver('/etc/hosts')
cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor) cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
@ -554,7 +554,9 @@ class TwistedResolver(Resolver):
resolved_family = socket.AF_INET6 resolved_family = socket.AF_INET6
else: else:
deferred = self.resolver.getHostByName(utf8(host)) deferred = self.resolver.getHostByName(utf8(host))
resolved = yield gen.Task(deferred.addBoth) fut = Future()
deferred.addBoth(fut.set_result)
resolved = yield fut
if isinstance(resolved, failure.Failure): if isinstance(resolved, failure.Failure):
try: try:
resolved.raiseException() resolved.raiseException()
@ -586,6 +588,6 @@ if hasattr(gen.convert_yielded, 'register'):
# Should never happen, but just in case # Should never happen, but just in case
raise Exception("errback called without error") raise Exception("errback called without error")
except: except:
f.set_exc_info(sys.exc_info()) future_set_exc_info(f, sys.exc_info())
d.addCallbacks(f.set_result, errback) d.addCallbacks(f.set_result, errback)
return f return f

View file

@ -8,7 +8,7 @@ import ctypes.wintypes # type: ignore
# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx # See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD) SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD) # noqa: E501
SetHandleInformation.restype = ctypes.wintypes.BOOL SetHandleInformation.restype = ctypes.wintypes.BOOL
HANDLE_FLAG_INHERIT = 0x00000001 HANDLE_FLAG_INHERIT = 0x00000001

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2011 Facebook # Copyright 2011 Facebook
# #
@ -29,7 +28,7 @@ import time
from binascii import hexlify from binascii import hexlify
from tornado.concurrent import Future from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado import ioloop from tornado import ioloop
from tornado.iostream import PipeIOStream from tornado.iostream import PipeIOStream
from tornado.log import gen_log from tornado.log import gen_log
@ -126,10 +125,6 @@ def fork_processes(num_processes, max_restarts=100):
assert _task_id is None assert _task_id is None
if num_processes is None or num_processes <= 0: if num_processes is None or num_processes <= 0:
num_processes = cpu_count() num_processes = cpu_count()
if ioloop.IOLoop.initialized():
raise RuntimeError("Cannot run in multiple processes: IOLoop instance "
"has already been initialized. You cannot call "
"IOLoop.instance() before calling start_processes()")
gen_log.info("Starting %d processes", num_processes) gen_log.info("Starting %d processes", num_processes)
children = {} children = {}
@ -199,16 +194,17 @@ class Subprocess(object):
* ``stdin``, ``stdout``, and ``stderr`` may have the value * ``stdin``, ``stdout``, and ``stderr`` may have the value
``tornado.process.Subprocess.STREAM``, which will make the corresponding ``tornado.process.Subprocess.STREAM``, which will make the corresponding
attribute of the resulting Subprocess a `.PipeIOStream`. attribute of the resulting Subprocess a `.PipeIOStream`. If this option
* A new keyword argument ``io_loop`` may be used to pass in an IOLoop. is used, the caller is responsible for closing the streams when done
with them.
The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and
``wait_for_exit`` methods do not work on Windows. There is ``wait_for_exit`` methods do not work on Windows. There is
therefore no reason to use this class instead of therefore no reason to use this class instead of
``subprocess.Popen`` on that platform. ``subprocess.Popen`` on that platform.
.. versionchanged:: 4.1 .. versionchanged:: 5.0
The ``io_loop`` argument is deprecated. The ``io_loop`` argument (deprecated since version 4.1) has been removed.
""" """
STREAM = object() STREAM = object()
@ -217,7 +213,7 @@ class Subprocess(object):
_waiting = {} # type: ignore _waiting = {} # type: ignore
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self.io_loop = kwargs.pop('io_loop', None) or ioloop.IOLoop.current() self.io_loop = ioloop.IOLoop.current()
# All FDs we create should be closed on error; those in to_close # All FDs we create should be closed on error; those in to_close
# should be closed in the parent process on success. # should be closed in the parent process on success.
pipe_fds = [] pipe_fds = []
@ -227,19 +223,19 @@ class Subprocess(object):
kwargs['stdin'] = in_r kwargs['stdin'] = in_r
pipe_fds.extend((in_r, in_w)) pipe_fds.extend((in_r, in_w))
to_close.append(in_r) to_close.append(in_r)
self.stdin = PipeIOStream(in_w, io_loop=self.io_loop) self.stdin = PipeIOStream(in_w)
if kwargs.get('stdout') is Subprocess.STREAM: if kwargs.get('stdout') is Subprocess.STREAM:
out_r, out_w = _pipe_cloexec() out_r, out_w = _pipe_cloexec()
kwargs['stdout'] = out_w kwargs['stdout'] = out_w
pipe_fds.extend((out_r, out_w)) pipe_fds.extend((out_r, out_w))
to_close.append(out_w) to_close.append(out_w)
self.stdout = PipeIOStream(out_r, io_loop=self.io_loop) self.stdout = PipeIOStream(out_r)
if kwargs.get('stderr') is Subprocess.STREAM: if kwargs.get('stderr') is Subprocess.STREAM:
err_r, err_w = _pipe_cloexec() err_r, err_w = _pipe_cloexec()
kwargs['stderr'] = err_w kwargs['stderr'] = err_w
pipe_fds.extend((err_r, err_w)) pipe_fds.extend((err_r, err_w))
to_close.append(err_w) to_close.append(err_w)
self.stderr = PipeIOStream(err_r, io_loop=self.io_loop) self.stderr = PipeIOStream(err_r)
try: try:
self.proc = subprocess.Popen(*args, **kwargs) self.proc = subprocess.Popen(*args, **kwargs)
except: except:
@ -270,7 +266,7 @@ class Subprocess(object):
signal handler is causing a problem. signal handler is causing a problem.
""" """
self._exit_callback = stack_context.wrap(callback) self._exit_callback = stack_context.wrap(callback)
Subprocess.initialize(self.io_loop) Subprocess.initialize()
Subprocess._waiting[self.pid] = self Subprocess._waiting[self.pid] = self
Subprocess._try_cleanup_process(self.pid) Subprocess._try_cleanup_process(self.pid)
@ -297,12 +293,12 @@ class Subprocess(object):
# Unfortunately we don't have the original args any more. # Unfortunately we don't have the original args any more.
future.set_exception(CalledProcessError(ret, None)) future.set_exception(CalledProcessError(ret, None))
else: else:
future.set_result(ret) future_set_result_unless_cancelled(future, ret)
self.set_exit_callback(callback) self.set_exit_callback(callback)
return future return future
@classmethod @classmethod
def initialize(cls, io_loop=None): def initialize(cls):
"""Initializes the ``SIGCHLD`` handler. """Initializes the ``SIGCHLD`` handler.
The signal handler is run on an `.IOLoop` to avoid locking issues. The signal handler is run on an `.IOLoop` to avoid locking issues.
@ -310,13 +306,13 @@ class Subprocess(object):
same one used by individual Subprocess objects (as long as the same one used by individual Subprocess objects (as long as the
``IOLoops`` are each running in separate threads). ``IOLoops`` are each running in separate threads).
.. versionchanged:: 4.1 .. versionchanged:: 5.0
The ``io_loop`` argument is deprecated. The ``io_loop`` argument (deprecated since version 4.1) has been
removed.
""" """
if cls._initialized: if cls._initialized:
return return
if io_loop is None: io_loop = ioloop.IOLoop.current()
io_loop = ioloop.IOLoop.current()
cls._old_sigchld = signal.signal( cls._old_sigchld = signal.signal(
signal.SIGCHLD, signal.SIGCHLD,
lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup)) lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup))

View file

@ -12,7 +12,9 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""Asynchronous queues for coroutines. """Asynchronous queues for coroutines. These classes are very similar
to those provided in the standard library's `asyncio package
<https://docs.python.org/3/library/asyncio-queue.html>`_.
.. warning:: .. warning::
@ -20,6 +22,7 @@
are *not* thread-safe. To use these queues from another thread, are *not* thread-safe. To use these queues from another thread,
use `.IOLoop.add_callback` to transfer control to the `.IOLoop` thread use `.IOLoop.add_callback` to transfer control to the `.IOLoop` thread
before calling any queue methods. before calling any queue methods.
""" """
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
@ -28,7 +31,7 @@ import collections
import heapq import heapq
from tornado import gen, ioloop from tornado import gen, ioloop
from tornado.concurrent import Future from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado.locks import Event from tornado.locks import Event
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty'] __all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
@ -47,7 +50,8 @@ class QueueFull(Exception):
def _set_timeout(future, timeout): def _set_timeout(future, timeout):
if timeout: if timeout:
def on_timeout(): def on_timeout():
future.set_exception(gen.TimeoutError()) if not future.done():
future.set_exception(gen.TimeoutError())
io_loop = ioloop.IOLoop.current() io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout) timeout_handle = io_loop.add_timeout(timeout, on_timeout)
future.add_done_callback( future.add_done_callback(
@ -166,18 +170,23 @@ class Queue(object):
def put(self, item, timeout=None): def put(self, item, timeout=None):
"""Put an item into the queue, perhaps waiting until there is room. """Put an item into the queue, perhaps waiting until there is room.
Returns a Future, which raises `tornado.gen.TimeoutError` after a Returns a Future, which raises `tornado.util.TimeoutError` after a
timeout. timeout.
``timeout`` may be a number denoting a time (on the same
scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time.
""" """
future = Future()
try: try:
self.put_nowait(item) self.put_nowait(item)
except QueueFull: except QueueFull:
future = Future()
self._putters.append((item, future)) self._putters.append((item, future))
_set_timeout(future, timeout) _set_timeout(future, timeout)
return future
else: else:
return gen._null_future future.set_result(None)
return future
def put_nowait(self, item): def put_nowait(self, item):
"""Put an item into the queue without blocking. """Put an item into the queue without blocking.
@ -189,7 +198,7 @@ class Queue(object):
assert self.empty(), "queue non-empty, why are getters waiting?" assert self.empty(), "queue non-empty, why are getters waiting?"
getter = self._getters.popleft() getter = self._getters.popleft()
self.__put_internal(item) self.__put_internal(item)
getter.set_result(self._get()) future_set_result_unless_cancelled(getter, self._get())
elif self.full(): elif self.full():
raise QueueFull raise QueueFull
else: else:
@ -199,7 +208,12 @@ class Queue(object):
"""Remove and return an item from the queue. """Remove and return an item from the queue.
Returns a Future which resolves once an item is available, or raises Returns a Future which resolves once an item is available, or raises
`tornado.gen.TimeoutError` after a timeout. `tornado.util.TimeoutError` after a timeout.
``timeout`` may be a number denoting a time (on the same
scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time.
""" """
future = Future() future = Future()
try: try:
@ -220,7 +234,7 @@ class Queue(object):
assert self.full(), "queue not full, why are putters waiting?" assert self.full(), "queue not full, why are putters waiting?"
item, putter = self._putters.popleft() item, putter = self._putters.popleft()
self.__put_internal(item) self.__put_internal(item)
putter.set_result(None) future_set_result_unless_cancelled(putter, None)
return self._get() return self._get()
elif self.qsize(): elif self.qsize():
return self._get() return self._get()
@ -248,12 +262,11 @@ class Queue(object):
def join(self, timeout=None): def join(self, timeout=None):
"""Block until all items in the queue are processed. """Block until all items in the queue are processed.
Returns a Future, which raises `tornado.gen.TimeoutError` after a Returns a Future, which raises `tornado.util.TimeoutError` after a
timeout. timeout.
""" """
return self._finished.wait(timeout) return self._finished.wait(timeout)
@gen.coroutine
def __aiter__(self): def __aiter__(self):
return _QueueIterator(self) return _QueueIterator(self)

View file

@ -242,6 +242,11 @@ class _RoutingDelegate(httputil.HTTPMessageDelegate):
start_line=start_line, headers=headers) start_line=start_line, headers=headers)
self.delegate = self.router.find_handler(request) self.delegate = self.router.find_handler(request)
if self.delegate is None:
app_log.debug("Delegate for %s %s request not found",
start_line.method, start_line.path)
self.delegate = _DefaultMessageDelegate(self.request_conn)
return self.delegate.headers_received(start_line, headers) return self.delegate.headers_received(start_line, headers)
def data_received(self, chunk): def data_received(self, chunk):
@ -254,6 +259,16 @@ class _RoutingDelegate(httputil.HTTPMessageDelegate):
self.delegate.on_connection_close() self.delegate.on_connection_close()
class _DefaultMessageDelegate(httputil.HTTPMessageDelegate):
def __init__(self, connection):
self.connection = connection
def finish(self):
self.connection.write_headers(
httputil.ResponseStartLine("HTTP/1.1", 404, "Not Found"), httputil.HTTPHeaders())
self.connection.finish()
class RuleRouter(Router): class RuleRouter(Router):
"""Rule-based router implementation.""" """Rule-based router implementation."""
@ -278,7 +293,8 @@ class RuleRouter(Router):
]) ])
In the examples above, ``Target`` can be a nested `Router` instance, an instance of In the examples above, ``Target`` can be a nested `Router` instance, an instance of
`~.httputil.HTTPServerConnectionDelegate` or an old-style callable, accepting a request argument. `~.httputil.HTTPServerConnectionDelegate` or an old-style callable,
accepting a request argument.
:arg rules: a list of `Rule` instances or tuples of `Rule` :arg rules: a list of `Rule` instances or tuples of `Rule`
constructor arguments. constructor arguments.
@ -567,7 +583,7 @@ class PathMatches(Matcher):
else: else:
try: try:
unescaped_fragment = re_unescape(fragment) unescaped_fragment = re_unescape(fragment)
except ValueError as exc: except ValueError:
# If we can't unescape part of it, we can't # If we can't unescape part of it, we can't
# reverse this url. # reverse this url.
return (None, None) return (None, None)
@ -589,7 +605,7 @@ class URLSpec(Rule):
* ``pattern``: Regular expression to be matched. Any capturing * ``pattern``: Regular expression to be matched. Any capturing
groups in the regex will be passed in to the handler's groups in the regex will be passed in to the handler's
get/post/etc methods as arguments (by keyword if named, by get/post/etc methods as arguments (by keyword if named, by
position if unnamed. Named and unnamed capturing groups may position if unnamed. Named and unnamed capturing groups
may not be mixed in the same rule). may not be mixed in the same rule).
* ``handler``: `~.web.RequestHandler` subclass to be invoked. * ``handler``: `~.web.RequestHandler` subclass to be invoked.

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
from tornado.escape import utf8, _unicode from tornado.escape import utf8, _unicode
@ -6,6 +5,7 @@ from tornado import gen
from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy
from tornado import httputil from tornado import httputil
from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError from tornado.iostream import StreamClosedError
from tornado.netutil import Resolver, OverrideResolver, _client_ssl_defaults from tornado.netutil import Resolver, OverrideResolver, _client_ssl_defaults
from tornado.log import gen_log from tornado.log import gen_log
@ -34,18 +34,6 @@ except ImportError:
# ssl is not available on Google App Engine. # ssl is not available on Google App Engine.
ssl = None ssl = None
try:
import certifi
except ImportError:
certifi = None
def _default_ca_certs():
if certifi is None:
raise Exception("The 'certifi' package is required to use https "
"in simple_httpclient")
return certifi.where()
class SimpleAsyncHTTPClient(AsyncHTTPClient): class SimpleAsyncHTTPClient(AsyncHTTPClient):
"""Non-blocking HTTP client with no external dependencies. """Non-blocking HTTP client with no external dependencies.
@ -56,7 +44,7 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
are not reused, and callers cannot select the network interface to be are not reused, and callers cannot select the network interface to be
used. used.
""" """
def initialize(self, io_loop, max_clients=10, def initialize(self, max_clients=10,
hostname_mapping=None, max_buffer_size=104857600, hostname_mapping=None, max_buffer_size=104857600,
resolver=None, defaults=None, max_header_size=None, resolver=None, defaults=None, max_header_size=None,
max_body_size=None): max_body_size=None):
@ -92,8 +80,7 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
.. versionchanged:: 4.2 .. versionchanged:: 4.2
Added the ``max_body_size`` argument. Added the ``max_body_size`` argument.
""" """
super(SimpleAsyncHTTPClient, self).initialize(io_loop, super(SimpleAsyncHTTPClient, self).initialize(defaults=defaults)
defaults=defaults)
self.max_clients = max_clients self.max_clients = max_clients
self.queue = collections.deque() self.queue = collections.deque()
self.active = {} self.active = {}
@ -107,12 +94,12 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
self.resolver = resolver self.resolver = resolver
self.own_resolver = False self.own_resolver = False
else: else:
self.resolver = Resolver(io_loop=io_loop) self.resolver = Resolver()
self.own_resolver = True self.own_resolver = True
if hostname_mapping is not None: if hostname_mapping is not None:
self.resolver = OverrideResolver(resolver=self.resolver, self.resolver = OverrideResolver(resolver=self.resolver,
mapping=hostname_mapping) mapping=hostname_mapping)
self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop) self.tcp_client = TCPClient(resolver=self.resolver)
def close(self): def close(self):
super(SimpleAsyncHTTPClient, self).close() super(SimpleAsyncHTTPClient, self).close()
@ -153,7 +140,7 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
def _handle_request(self, request, release_callback, final_callback): def _handle_request(self, request, release_callback, final_callback):
self._connection_class()( self._connection_class()(
self.io_loop, self, request, release_callback, self, request, release_callback,
final_callback, self.max_buffer_size, self.tcp_client, final_callback, self.max_buffer_size, self.tcp_client,
self.max_header_size, self.max_body_size) self.max_header_size, self.max_body_size)
@ -190,11 +177,11 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
class _HTTPConnection(httputil.HTTPMessageDelegate): class _HTTPConnection(httputil.HTTPMessageDelegate):
_SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"]) _SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"])
def __init__(self, io_loop, client, request, release_callback, def __init__(self, client, request, release_callback,
final_callback, max_buffer_size, tcp_client, final_callback, max_buffer_size, tcp_client,
max_header_size, max_body_size): max_header_size, max_body_size):
self.start_time = io_loop.time() self.io_loop = IOLoop.current()
self.io_loop = io_loop self.start_time = self.io_loop.time()
self.client = client self.client = client
self.request = request self.request = request
self.release_callback = release_callback self.release_callback = release_callback
@ -240,10 +227,10 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
self._timeout = self.io_loop.add_timeout( self._timeout = self.io_loop.add_timeout(
self.start_time + timeout, self.start_time + timeout,
stack_context.wrap(functools.partial(self._on_timeout, "while connecting"))) stack_context.wrap(functools.partial(self._on_timeout, "while connecting")))
self.tcp_client.connect(host, port, af=af, fut = self.tcp_client.connect(host, port, af=af,
ssl_options=ssl_options, ssl_options=ssl_options,
max_buffer_size=self.max_buffer_size, max_buffer_size=self.max_buffer_size)
callback=self._on_connect) fut.add_done_callback(stack_context.wrap(self._on_connect))
def _get_ssl_options(self, scheme): def _get_ssl_options(self, scheme):
if scheme == "https": if scheme == "https":
@ -256,42 +243,19 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
self.request.client_cert is None and self.request.client_cert is None and
self.request.client_key is None): self.request.client_key is None):
return _client_ssl_defaults return _client_ssl_defaults
ssl_options = {} ssl_ctx = ssl.create_default_context(
if self.request.validate_cert: ssl.Purpose.SERVER_AUTH,
ssl_options["cert_reqs"] = ssl.CERT_REQUIRED cafile=self.request.ca_certs)
if self.request.ca_certs is not None: if not self.request.validate_cert:
ssl_options["ca_certs"] = self.request.ca_certs ssl_ctx.check_hostname = False
elif not hasattr(ssl, 'create_default_context'): ssl_ctx.verify_mode = ssl.CERT_NONE
# When create_default_context is present,
# we can omit the "ca_certs" parameter entirely,
# which avoids the dependency on "certifi" for py34.
ssl_options["ca_certs"] = _default_ca_certs()
if self.request.client_key is not None:
ssl_options["keyfile"] = self.request.client_key
if self.request.client_cert is not None: if self.request.client_cert is not None:
ssl_options["certfile"] = self.request.client_cert ssl_ctx.load_cert_chain(self.request.client_cert,
self.request.client_key)
# SSL interoperability is tricky. We want to disable if hasattr(ssl, 'OP_NO_COMPRESSION'):
# SSLv2 for security reasons; it wasn't disabled by default # See netutil.ssl_options_to_context
# until openssl 1.0. The best way to do this is to use ssl_ctx.options |= ssl.OP_NO_COMPRESSION
# the SSL_OP_NO_SSLv2, but that wasn't exposed to python return ssl_ctx
# until 3.2. Python 2.7 adds the ciphers argument, which
# can also be used to disable SSLv2. As a last resort
# on python 2.6, we set ssl_version to TLSv1. This is
# more narrow than we'd like since it also breaks
# compatibility with servers configured for SSLv3 only,
# but nearly all servers support both SSLv3 and TLSv1:
# http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html
if sys.version_info >= (2, 7):
# In addition to disabling SSLv2, we also exclude certain
# classes of insecure ciphers.
ssl_options["ciphers"] = "DEFAULT:!SSLv2:!EXPORT:!DES"
else:
# This is really only necessary for pre-1.0 versions
# of openssl, but python 2.6 doesn't expose version
# information.
ssl_options["ssl_version"] = ssl.PROTOCOL_TLSv1
return ssl_options
return None return None
def _on_timeout(self, info=None): def _on_timeout(self, info=None):
@ -311,7 +275,8 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
self.io_loop.remove_timeout(self._timeout) self.io_loop.remove_timeout(self._timeout)
self._timeout = None self._timeout = None
def _on_connect(self, stream): def _on_connect(self, stream_fut):
stream = stream_fut.result()
if self.final_callback is None: if self.final_callback is None:
# final_callback is cleared if we've hit our timeout. # final_callback is cleared if we've hit our timeout.
stream.close() stream.close()

View file

@ -1,9 +1,12 @@
#define PY_SSIZE_T_CLEAN #define PY_SSIZE_T_CLEAN
#include <Python.h> #include <Python.h>
#include <stdint.h>
static PyObject* websocket_mask(PyObject* self, PyObject* args) { static PyObject* websocket_mask(PyObject* self, PyObject* args) {
const char* mask; const char* mask;
Py_ssize_t mask_len; Py_ssize_t mask_len;
uint32_t uint32_mask;
uint64_t uint64_mask;
const char* data; const char* data;
Py_ssize_t data_len; Py_ssize_t data_len;
Py_ssize_t i; Py_ssize_t i;
@ -14,13 +17,35 @@ static PyObject* websocket_mask(PyObject* self, PyObject* args) {
return NULL; return NULL;
} }
uint32_mask = ((uint32_t*)mask)[0];
result = PyBytes_FromStringAndSize(NULL, data_len); result = PyBytes_FromStringAndSize(NULL, data_len);
if (!result) { if (!result) {
return NULL; return NULL;
} }
buf = PyBytes_AsString(result); buf = PyBytes_AsString(result);
if (sizeof(size_t) >= 8) {
uint64_mask = uint32_mask;
uint64_mask = (uint64_mask << 32) | uint32_mask;
while (data_len >= 8) {
((uint64_t*)buf)[0] = ((uint64_t*)data)[0] ^ uint64_mask;
data += 8;
buf += 8;
data_len -= 8;
}
}
while (data_len >= 4) {
((uint32_t*)buf)[0] = ((uint32_t*)data)[0] ^ uint32_mask;
data += 4;
buf += 4;
data_len -= 4;
}
for (i = 0; i < data_len; i++) { for (i = 0; i < data_len; i++) {
buf[i] = data[i] ^ mask[i % 4]; buf[i] = data[i] ^ mask[i];
} }
return result; return result;

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2010 Facebook # Copyright 2010 Facebook
# #

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2014 Facebook # Copyright 2014 Facebook
# #
@ -20,12 +19,17 @@ from __future__ import absolute_import, division, print_function
import functools import functools
import socket import socket
import numbers
import datetime
from tornado.concurrent import Future from tornado.concurrent import Future, future_add_done_callback
from tornado.ioloop import IOLoop from tornado.ioloop import IOLoop
from tornado.iostream import IOStream from tornado.iostream import IOStream
from tornado import gen from tornado import gen
from tornado.netutil import Resolver from tornado.netutil import Resolver
from tornado.platform.auto import set_close_exec
from tornado.gen import TimeoutError
from tornado.util import timedelta_to_seconds
_INITIAL_CONNECT_TIMEOUT = 0.3 _INITIAL_CONNECT_TIMEOUT = 0.3
@ -47,15 +51,17 @@ class _Connector(object):
http://tools.ietf.org/html/rfc6555 http://tools.ietf.org/html/rfc6555
""" """
def __init__(self, addrinfo, io_loop, connect): def __init__(self, addrinfo, connect):
self.io_loop = io_loop self.io_loop = IOLoop.current()
self.connect = connect self.connect = connect
self.future = Future() self.future = Future()
self.timeout = None self.timeout = None
self.connect_timeout = None
self.last_error = None self.last_error = None
self.remaining = len(addrinfo) self.remaining = len(addrinfo)
self.primary_addrs, self.secondary_addrs = self.split(addrinfo) self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
self.streams = set()
@staticmethod @staticmethod
def split(addrinfo): def split(addrinfo):
@ -77,9 +83,11 @@ class _Connector(object):
secondary.append((af, addr)) secondary.append((af, addr))
return primary, secondary return primary, secondary
def start(self, timeout=_INITIAL_CONNECT_TIMEOUT): def start(self, timeout=_INITIAL_CONNECT_TIMEOUT, connect_timeout=None):
self.try_connect(iter(self.primary_addrs)) self.try_connect(iter(self.primary_addrs))
self.set_timout(timeout) self.set_timeout(timeout)
if connect_timeout is not None:
self.set_connect_timeout(connect_timeout)
return self.future return self.future
def try_connect(self, addrs): def try_connect(self, addrs):
@ -93,9 +101,10 @@ class _Connector(object):
self.future.set_exception(self.last_error or self.future.set_exception(self.last_error or
IOError("connection failed")) IOError("connection failed"))
return return
future = self.connect(af, addr) stream, future = self.connect(af, addr)
future.add_done_callback(functools.partial(self.on_connect_done, self.streams.add(stream)
addrs, af, addr)) future_add_done_callback(
future, functools.partial(self.on_connect_done, addrs, af, addr))
def on_connect_done(self, addrs, af, addr, future): def on_connect_done(self, addrs, af, addr, future):
self.remaining -= 1 self.remaining -= 1
@ -114,39 +123,60 @@ class _Connector(object):
self.io_loop.remove_timeout(self.timeout) self.io_loop.remove_timeout(self.timeout)
self.on_timeout() self.on_timeout()
return return
self.clear_timeout() self.clear_timeouts()
if self.future.done(): if self.future.done():
# This is a late arrival; just drop it. # This is a late arrival; just drop it.
stream.close() stream.close()
else: else:
self.streams.discard(stream)
self.future.set_result((af, addr, stream)) self.future.set_result((af, addr, stream))
self.close_streams()
def set_timout(self, timeout): def set_timeout(self, timeout):
self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
self.on_timeout) self.on_timeout)
def on_timeout(self): def on_timeout(self):
self.timeout = None self.timeout = None
self.try_connect(iter(self.secondary_addrs)) if not self.future.done():
self.try_connect(iter(self.secondary_addrs))
def clear_timeout(self): def clear_timeout(self):
if self.timeout is not None: if self.timeout is not None:
self.io_loop.remove_timeout(self.timeout) self.io_loop.remove_timeout(self.timeout)
def set_connect_timeout(self, connect_timeout):
self.connect_timeout = self.io_loop.add_timeout(
connect_timeout, self.on_connect_timeout)
def on_connect_timeout(self):
if not self.future.done():
self.future.set_exception(TimeoutError())
self.close_streams()
def clear_timeouts(self):
if self.timeout is not None:
self.io_loop.remove_timeout(self.timeout)
if self.connect_timeout is not None:
self.io_loop.remove_timeout(self.connect_timeout)
def close_streams(self):
for stream in self.streams:
stream.close()
class TCPClient(object): class TCPClient(object):
"""A non-blocking TCP connection factory. """A non-blocking TCP connection factory.
.. versionchanged:: 4.1 .. versionchanged:: 5.0
The ``io_loop`` argument is deprecated. The ``io_loop`` argument (deprecated since version 4.1) has been removed.
""" """
def __init__(self, resolver=None, io_loop=None): def __init__(self, resolver=None):
self.io_loop = io_loop or IOLoop.current()
if resolver is not None: if resolver is not None:
self.resolver = resolver self.resolver = resolver
self._own_resolver = False self._own_resolver = False
else: else:
self.resolver = Resolver(io_loop=io_loop) self.resolver = Resolver()
self._own_resolver = True self._own_resolver = True
def close(self): def close(self):
@ -155,7 +185,8 @@ class TCPClient(object):
@gen.coroutine @gen.coroutine
def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
max_buffer_size=None, source_ip=None, source_port=None): max_buffer_size=None, source_ip=None, source_port=None,
timeout=None):
"""Connect to the given host and port. """Connect to the given host and port.
Asynchronously returns an `.IOStream` (or `.SSLIOStream` if Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
@ -167,25 +198,48 @@ class TCPClient(object):
use a specific interface, it has to be handled outside use a specific interface, it has to be handled outside
of Tornado as this depends very much on the platform. of Tornado as this depends very much on the platform.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
Similarly, when the user requires a certain source port, it can Similarly, when the user requires a certain source port, it can
be specified using the ``source_port`` arg. be specified using the ``source_port`` arg.
.. versionchanged:: 4.5 .. versionchanged:: 4.5
Added the ``source_ip`` and ``source_port`` arguments. Added the ``source_ip`` and ``source_port`` arguments.
.. versionchanged:: 5.0
Added the ``timeout`` argument.
""" """
addrinfo = yield self.resolver.resolve(host, port, af) if timeout is not None:
if isinstance(timeout, numbers.Real):
timeout = IOLoop.current().time() + timeout
elif isinstance(timeout, datetime.timedelta):
timeout = IOLoop.current().time() + timedelta_to_seconds(timeout)
else:
raise TypeError("Unsupported timeout %r" % timeout)
if timeout is not None:
addrinfo = yield gen.with_timeout(
timeout, self.resolver.resolve(host, port, af))
else:
addrinfo = yield self.resolver.resolve(host, port, af)
connector = _Connector( connector = _Connector(
addrinfo, self.io_loop, addrinfo,
functools.partial(self._create_stream, max_buffer_size, functools.partial(self._create_stream, max_buffer_size,
source_ip=source_ip, source_port=source_port) source_ip=source_ip, source_port=source_port)
) )
af, addr, stream = yield connector.start() af, addr, stream = yield connector.start(connect_timeout=timeout)
# TODO: For better performance we could cache the (af, addr) # TODO: For better performance we could cache the (af, addr)
# information here and re-use it on subsequent connections to # information here and re-use it on subsequent connections to
# the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
if ssl_options is not None: if ssl_options is not None:
stream = yield stream.start_tls(False, ssl_options=ssl_options, if timeout is not None:
server_hostname=host) stream = yield gen.with_timeout(timeout, stream.start_tls(
False, ssl_options=ssl_options, server_hostname=host))
else:
stream = yield stream.start_tls(False, ssl_options=ssl_options,
server_hostname=host)
raise gen.Return(stream) raise gen.Return(stream)
def _create_stream(self, max_buffer_size, af, addr, source_ip=None, def _create_stream(self, max_buffer_size, af, addr, source_ip=None,
@ -202,6 +256,7 @@ class TCPClient(object):
# - 127.0.0.1 for IPv4 # - 127.0.0.1 for IPv4
# - ::1 for IPv6 # - ::1 for IPv6
socket_obj = socket.socket(af) socket_obj = socket.socket(af)
set_close_exec(socket_obj.fileno())
if source_port_bind or source_ip_bind: if source_port_bind or source_ip_bind:
# If the user requires binding also to a specific IP/port. # If the user requires binding also to a specific IP/port.
try: try:
@ -212,11 +267,10 @@ class TCPClient(object):
raise raise
try: try:
stream = IOStream(socket_obj, stream = IOStream(socket_obj,
io_loop=self.io_loop,
max_buffer_size=max_buffer_size) max_buffer_size=max_buffer_size)
except socket.error as e: except socket.error as e:
fu = Future() fu = Future()
fu.set_exception(e) fu.set_exception(e)
return fu return fu
else: else:
return stream.connect(addr) return stream, stream.connect(addr)

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2011 Facebook # Copyright 2011 Facebook
# #
@ -102,12 +101,15 @@ class TCPServer(object):
.. versionadded:: 3.1 .. versionadded:: 3.1
The ``max_buffer_size`` argument. The ``max_buffer_size`` argument.
.. versionchanged:: 5.0
The ``io_loop`` argument has been removed.
""" """
def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None, def __init__(self, ssl_options=None, max_buffer_size=None,
read_chunk_size=None): read_chunk_size=None):
self.io_loop = io_loop
self.ssl_options = ssl_options self.ssl_options = ssl_options
self._sockets = {} # fd -> socket object self._sockets = {} # fd -> socket object
self._handlers = {} # fd -> remove_handler callable
self._pending_sockets = [] self._pending_sockets = []
self._started = False self._started = False
self._stopped = False self._stopped = False
@ -151,13 +153,10 @@ class TCPServer(object):
method and `tornado.process.fork_processes` to provide greater method and `tornado.process.fork_processes` to provide greater
control over the initialization of a multi-process server. control over the initialization of a multi-process server.
""" """
if self.io_loop is None:
self.io_loop = IOLoop.current()
for sock in sockets: for sock in sockets:
self._sockets[sock.fileno()] = sock self._sockets[sock.fileno()] = sock
add_accept_handler(sock, self._handle_connection, self._handlers[sock.fileno()] = add_accept_handler(
io_loop=self.io_loop) sock, self._handle_connection)
def add_socket(self, socket): def add_socket(self, socket):
"""Singular version of `add_sockets`. Takes a single socket object.""" """Singular version of `add_sockets`. Takes a single socket object."""
@ -234,7 +233,8 @@ class TCPServer(object):
self._stopped = True self._stopped = True
for fd, sock in self._sockets.items(): for fd, sock in self._sockets.items():
assert sock.fileno() == fd assert sock.fileno() == fd
self.io_loop.remove_handler(fd) # Unregister socket from IOLoop
self._handlers.pop(fd)()
sock.close() sock.close()
def handle_stream(self, stream, address): def handle_stream(self, stream, address):
@ -284,17 +284,17 @@ class TCPServer(object):
raise raise
try: try:
if self.ssl_options is not None: if self.ssl_options is not None:
stream = SSLIOStream(connection, io_loop=self.io_loop, stream = SSLIOStream(connection,
max_buffer_size=self.max_buffer_size, max_buffer_size=self.max_buffer_size,
read_chunk_size=self.read_chunk_size) read_chunk_size=self.read_chunk_size)
else: else:
stream = IOStream(connection, io_loop=self.io_loop, stream = IOStream(connection,
max_buffer_size=self.max_buffer_size, max_buffer_size=self.max_buffer_size,
read_chunk_size=self.read_chunk_size) read_chunk_size=self.read_chunk_size)
future = self.handle_stream(stream, address) future = self.handle_stream(stream, address)
if future is not None: if future is not None:
self.io_loop.add_future(gen.convert_yielded(future), IOLoop.current().add_future(gen.convert_yielded(future),
lambda f: f.result()) lambda f: f.result())
except Exception: except Exception:
app_log.error("Error in connection callback", exc_info=True) app_log.error("Error in connection callback", exc_info=True)

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #
@ -260,9 +259,8 @@ class Template(object):
:arg str template_string: the contents of the template file. :arg str template_string: the contents of the template file.
:arg str name: the filename from which the template was loaded :arg str name: the filename from which the template was loaded
(used for error message). (used for error message).
:arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible for this template, :arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible
used to resolve ``{% include %}`` and ``{% extend %}`` for this template, used to resolve ``{% include %}`` and ``{% extend %}`` directives.
directives.
:arg bool compress_whitespace: Deprecated since Tornado 4.3. :arg bool compress_whitespace: Deprecated since Tornado 4.3.
Equivalent to ``whitespace="single"`` if true and Equivalent to ``whitespace="single"`` if true and
``whitespace="all"`` if false. ``whitespace="all"`` if false.

View file

@ -1,10 +1,9 @@
#!/usr/bin/env python
"""Support classes for automated testing. """Support classes for automated testing.
* `AsyncTestCase` and `AsyncHTTPTestCase`: Subclasses of unittest.TestCase * `AsyncTestCase` and `AsyncHTTPTestCase`: Subclasses of unittest.TestCase
with additional support for testing asynchronous (`.IOLoop`-based) code. with additional support for testing asynchronous (`.IOLoop`-based) code.
* `ExpectLog` and `LogTrapTestCase`: Make test logs less spammy. * `ExpectLog`: Make test logs less spammy.
* `main()`: A simple test runner (wrapper around unittest.main()) with support * `main()`: A simple test runner (wrapper around unittest.main()) with support
for the tornado.autoreload module to rerun the tests when code changes. for the tornado.autoreload module to rerun the tests when code changes.
@ -22,7 +21,7 @@ try:
from tornado.process import Subprocess from tornado.process import Subprocess
except ImportError: except ImportError:
# These modules are not importable on app engine. Parts of this module # These modules are not importable on app engine. Parts of this module
# won't work, but e.g. LogTrapTestCase and main() will. # won't work, but e.g. main() will.
AsyncHTTPClient = None # type: ignore AsyncHTTPClient = None # type: ignore
gen = None # type: ignore gen = None # type: ignore
HTTPServer = None # type: ignore HTTPServer = None # type: ignore
@ -30,7 +29,7 @@ except ImportError:
netutil = None # type: ignore netutil = None # type: ignore
SimpleAsyncHTTPClient = None # type: ignore SimpleAsyncHTTPClient = None # type: ignore
Subprocess = None # type: ignore Subprocess = None # type: ignore
from tornado.log import gen_log, app_log from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext from tornado.stack_context import ExceptionStackContext
from tornado.util import raise_exc_info, basestring_type, PY3 from tornado.util import raise_exc_info, basestring_type, PY3
import functools import functools
@ -42,10 +41,11 @@ import signal
import socket import socket
import sys import sys
if PY3: try:
from io import StringIO import asyncio
else: except ImportError:
from cStringIO import StringIO asyncio = None
try: try:
from collections.abc import Generator as GeneratorType # type: ignore from collections.abc import Generator as GeneratorType # type: ignore
@ -73,24 +73,12 @@ else:
except ImportError: except ImportError:
import unittest # type: ignore import unittest # type: ignore
_next_port = 10000
def get_unused_port():
"""Returns a (hopefully) unused port number.
This function does not guarantee that the port it returns is available,
only that a series of get_unused_port calls in a single process return
distinct ports.
.. deprecated::
Use bind_unused_port instead, which is guaranteed to find an unused port.
"""
global _next_port
port = _next_port
_next_port = _next_port + 1
return port
if asyncio is None:
_NON_OWNED_IOLOOPS = ()
else:
import tornado.platform.asyncio
_NON_OWNED_IOLOOPS = tornado.platform.asyncio.AsyncIOMainLoop
def bind_unused_port(reuse_port=False): def bind_unused_port(reuse_port=False):
"""Binds a server socket to an available port on localhost. """Binds a server socket to an available port on localhost.
@ -166,8 +154,7 @@ class AsyncTestCase(unittest.TestCase):
callbacks should call ``self.stop()`` to signal completion. callbacks should call ``self.stop()`` to signal completion.
By default, a new `.IOLoop` is constructed for each test and is available By default, a new `.IOLoop` is constructed for each test and is available
as ``self.io_loop``. This `.IOLoop` should be used in the construction of as ``self.io_loop``. If the code being tested requires a
HTTP clients/servers, etc. If the code being tested requires a
global `.IOLoop`, subclasses should override `get_new_ioloop` to return it. global `.IOLoop`, subclasses should override `get_new_ioloop` to return it.
The `.IOLoop`'s ``start`` and ``stop`` methods should not be The `.IOLoop`'s ``start`` and ``stop`` methods should not be
@ -182,7 +169,7 @@ class AsyncTestCase(unittest.TestCase):
class MyTestCase(AsyncTestCase): class MyTestCase(AsyncTestCase):
@tornado.testing.gen_test @tornado.testing.gen_test
def test_http_fetch(self): def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop) client = AsyncHTTPClient()
response = yield client.fetch("http://www.tornadoweb.org") response = yield client.fetch("http://www.tornadoweb.org")
# Test contents of response # Test contents of response
self.assertIn("FriendFeed", response.body) self.assertIn("FriendFeed", response.body)
@ -190,7 +177,7 @@ class AsyncTestCase(unittest.TestCase):
# This test uses argument passing between self.stop and self.wait. # This test uses argument passing between self.stop and self.wait.
class MyTestCase2(AsyncTestCase): class MyTestCase2(AsyncTestCase):
def test_http_fetch(self): def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop) client = AsyncHTTPClient()
client.fetch("http://www.tornadoweb.org/", self.stop) client.fetch("http://www.tornadoweb.org/", self.stop)
response = self.wait() response = self.wait()
# Test contents of response # Test contents of response
@ -199,7 +186,7 @@ class AsyncTestCase(unittest.TestCase):
# This test uses an explicit callback-based style. # This test uses an explicit callback-based style.
class MyTestCase3(AsyncTestCase): class MyTestCase3(AsyncTestCase):
def test_http_fetch(self): def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop) client = AsyncHTTPClient()
client.fetch("http://www.tornadoweb.org/", self.handle_fetch) client.fetch("http://www.tornadoweb.org/", self.handle_fetch)
self.wait() self.wait()
@ -235,8 +222,7 @@ class AsyncTestCase(unittest.TestCase):
# Clean up Subprocess, so it can be used again with a new ioloop. # Clean up Subprocess, so it can be used again with a new ioloop.
Subprocess.uninitialize() Subprocess.uninitialize()
self.io_loop.clear_current() self.io_loop.clear_current()
if (not IOLoop.initialized() or if not isinstance(self.io_loop, _NON_OWNED_IOLOOPS):
self.io_loop is not IOLoop.instance()):
# Try to clean up any file descriptors left open in the ioloop. # Try to clean up any file descriptors left open in the ioloop.
# This avoids leaks, especially when tests are run repeatedly # This avoids leaks, especially when tests are run repeatedly
# in the same process with autoreload (because curl does not # in the same process with autoreload (because curl does not
@ -250,9 +236,15 @@ class AsyncTestCase(unittest.TestCase):
self.__rethrow() self.__rethrow()
def get_new_ioloop(self): def get_new_ioloop(self):
"""Creates a new `.IOLoop` for this test. May be overridden in """Returns the `.IOLoop` to use for this test.
subclasses for tests that require a specific `.IOLoop` (usually
the singleton `.IOLoop.instance()`). By default, a new `.IOLoop` is created for each test.
Subclasses may override this method to return
`.IOLoop.current()` if it is not appropriate to use a new
`.IOLoop` in each tests (for example, if there are global
singletons using the default `.IOLoop`) or if a per-test event
loop is being provided by another system (such as
``pytest-asyncio``).
""" """
return IOLoop() return IOLoop()
@ -321,7 +313,8 @@ class AsyncTestCase(unittest.TestCase):
except Exception: except Exception:
self.__failure = sys.exc_info() self.__failure = sys.exc_info()
self.stop() self.stop()
self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, timeout_func) self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
timeout_func)
while True: while True:
self.__running = True self.__running = True
self.io_loop.start() self.io_loop.start()
@ -382,11 +375,10 @@ class AsyncHTTPTestCase(AsyncTestCase):
self.http_server.add_sockets([sock]) self.http_server.add_sockets([sock])
def get_http_client(self): def get_http_client(self):
return AsyncHTTPClient(io_loop=self.io_loop) return AsyncHTTPClient()
def get_http_server(self): def get_http_server(self):
return HTTPServer(self._app, io_loop=self.io_loop, return HTTPServer(self._app, **self.get_httpserver_options())
**self.get_httpserver_options())
def get_app(self): def get_app(self):
"""Should be overridden by subclasses to return a """Should be overridden by subclasses to return a
@ -395,14 +387,23 @@ class AsyncHTTPTestCase(AsyncTestCase):
raise NotImplementedError() raise NotImplementedError()
def fetch(self, path, **kwargs): def fetch(self, path, **kwargs):
"""Convenience method to synchronously fetch a url. """Convenience method to synchronously fetch a URL.
The given path will be appended to the local server's host and The given path will be appended to the local server's host and
port. Any additional kwargs will be passed directly to port. Any additional kwargs will be passed directly to
`.AsyncHTTPClient.fetch` (and so could be used to pass `.AsyncHTTPClient.fetch` (and so could be used to pass
``method="POST"``, ``body="..."``, etc). ``method="POST"``, ``body="..."``, etc).
If the path begins with http:// or https://, it will be treated as a
full URL and will be fetched as-is.
.. versionchanged:: 5.0
Added support for absolute URLs.
""" """
self.http_client.fetch(self.get_url(path), self.stop, **kwargs) if path.lower().startswith(('http://', 'https://')):
self.http_client.fetch(path, self.stop, **kwargs)
else:
self.http_client.fetch(self.get_url(path), self.stop, **kwargs)
return self.wait() return self.wait()
def get_httpserver_options(self): def get_httpserver_options(self):
@ -423,16 +424,14 @@ class AsyncHTTPTestCase(AsyncTestCase):
def get_url(self, path): def get_url(self, path):
"""Returns an absolute url for the given path on the test server.""" """Returns an absolute url for the given path on the test server."""
return '%s://localhost:%s%s' % (self.get_protocol(), return '%s://127.0.0.1:%s%s' % (self.get_protocol(),
self.get_http_port(), path) self.get_http_port(), path)
def tearDown(self): def tearDown(self):
self.http_server.stop() self.http_server.stop()
self.io_loop.run_sync(self.http_server.close_all_connections, self.io_loop.run_sync(self.http_server.close_all_connections,
timeout=get_async_test_timeout()) timeout=get_async_test_timeout())
if (not IOLoop.initialized() or self.http_client.close()
self.http_client.io_loop is not IOLoop.instance()):
self.http_client.close()
super(AsyncHTTPTestCase, self).tearDown() super(AsyncHTTPTestCase, self).tearDown()
@ -442,7 +441,7 @@ class AsyncHTTPSTestCase(AsyncHTTPTestCase):
Interface is generally the same as `AsyncHTTPTestCase`. Interface is generally the same as `AsyncHTTPTestCase`.
""" """
def get_http_client(self): def get_http_client(self):
return AsyncHTTPClient(io_loop=self.io_loop, force_instance=True, return AsyncHTTPClient(force_instance=True,
defaults=dict(validate_cert=False)) defaults=dict(validate_cert=False))
def get_httpserver_options(self): def get_httpserver_options(self):
@ -454,7 +453,8 @@ class AsyncHTTPSTestCase(AsyncHTTPTestCase):
By default includes a self-signed testing certificate. By default includes a self-signed testing certificate.
""" """
# Testing keys were generated with: # Testing keys were generated with:
# openssl req -new -keyout tornado/test/test.key -out tornado/test/test.crt -nodes -days 3650 -x509 # openssl req -new -keyout tornado/test/test.key \
# -out tornado/test/test.crt -nodes -days 3650 -x509
module_dir = os.path.dirname(__file__) module_dir = os.path.dirname(__file__)
return dict( return dict(
certfile=os.path.join(module_dir, 'test', 'test.crt'), certfile=os.path.join(module_dir, 'test', 'test.crt'),
@ -476,7 +476,7 @@ def gen_test(func=None, timeout=None):
class MyTest(AsyncHTTPTestCase): class MyTest(AsyncHTTPTestCase):
@gen_test @gen_test
def test_something(self): def test_something(self):
response = yield gen.Task(self.fetch('/')) response = yield self.http_client.fetch(self.get_url('/'))
By default, ``@gen_test`` times out after 5 seconds. The timeout may be By default, ``@gen_test`` times out after 5 seconds. The timeout may be
overridden globally with the ``ASYNC_TEST_TIMEOUT`` environment variable, overridden globally with the ``ASYNC_TEST_TIMEOUT`` environment variable,
@ -485,7 +485,11 @@ def gen_test(func=None, timeout=None):
class MyTest(AsyncHTTPTestCase): class MyTest(AsyncHTTPTestCase):
@gen_test(timeout=10) @gen_test(timeout=10)
def test_something_slow(self): def test_something_slow(self):
response = yield gen.Task(self.fetch('/')) response = yield self.http_client.fetch(self.get_url('/'))
Note that ``@gen_test`` is incompatible with `AsyncTestCase.stop`,
`AsyncTestCase.wait`, and `AsyncHTTPTestCase.fetch`. Use ``yield
self.http_client.fetch(self.get_url())`` as shown above instead.
.. versionadded:: 3.1 .. versionadded:: 3.1
The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment
@ -494,6 +498,7 @@ def gen_test(func=None, timeout=None):
.. versionchanged:: 4.0 .. versionchanged:: 4.0
The wrapper now passes along ``*args, **kwargs`` so it can be used The wrapper now passes along ``*args, **kwargs`` so it can be used
on functions with arguments. on functions with arguments.
""" """
if timeout is None: if timeout is None:
timeout = get_async_test_timeout() timeout = get_async_test_timeout()
@ -529,12 +534,17 @@ def gen_test(func=None, timeout=None):
timeout=timeout) timeout=timeout)
except TimeoutError as e: except TimeoutError as e:
# run_sync raises an error with an unhelpful traceback. # run_sync raises an error with an unhelpful traceback.
# Throw it back into the generator or coroutine so the stack # If the underlying generator is still running, we can throw the
# trace is replaced by the point where the test is stopped. # exception back into it so the stack trace is replaced by the
self._test_generator.throw(e) # point where the test is stopped. The only reason the generator
# In case the test contains an overly broad except clause, # would not be running would be if it were cancelled, which means
# we may get back here. In this case re-raise the original # a native coroutine, so we can rely on the cr_running attribute.
# exception, which is better than nothing. if getattr(self._test_generator, 'cr_running', True):
self._test_generator.throw(e)
# In case the test contains an overly broad except
# clause, we may get back here.
# Coroutine was stopped or didn't raise a useful stack trace,
# so re-raise the original exception which is better than nothing.
raise raise
return post_coroutine return post_coroutine
@ -554,49 +564,6 @@ def gen_test(func=None, timeout=None):
gen_test.__test__ = False # type: ignore gen_test.__test__ = False # type: ignore
class LogTrapTestCase(unittest.TestCase):
"""A test case that captures and discards all logging output
if the test passes.
Some libraries can produce a lot of logging output even when
the test succeeds, so this class can be useful to minimize the noise.
Simply use it as a base class for your test case. It is safe to combine
with AsyncTestCase via multiple inheritance
(``class MyTestCase(AsyncHTTPTestCase, LogTrapTestCase):``)
This class assumes that only one log handler is configured and
that it is a `~logging.StreamHandler`. This is true for both
`logging.basicConfig` and the "pretty logging" configured by
`tornado.options`. It is not compatible with other log buffering
mechanisms, such as those provided by some test runners.
.. deprecated:: 4.1
Use the unittest module's ``--buffer`` option instead, or `.ExpectLog`.
"""
def run(self, result=None):
logger = logging.getLogger()
if not logger.handlers:
logging.basicConfig()
handler = logger.handlers[0]
if (len(logger.handlers) > 1 or
not isinstance(handler, logging.StreamHandler)):
# Logging has been configured in a way we don't recognize,
# so just leave it alone.
super(LogTrapTestCase, self).run(result)
return
old_stream = handler.stream
try:
handler.stream = StringIO()
gen_log.info("RUNNING TEST: " + str(self))
old_error_count = len(result.failures) + len(result.errors)
super(LogTrapTestCase, self).run(result)
new_error_count = len(result.failures) + len(result.errors)
if new_error_count != old_error_count:
old_stream.write(handler.stream.getvalue())
finally:
handler.stream = old_stream
class ExpectLog(logging.Filter): class ExpectLog(logging.Filter):
"""Context manager to capture and suppress expected log output. """Context manager to capture and suppress expected log output.
@ -684,6 +651,12 @@ def main(**kwargs):
to show many test details as they are run. to show many test details as they are run.
See http://docs.python.org/library/unittest.html#unittest.main See http://docs.python.org/library/unittest.html#unittest.main
for full argument list. for full argument list.
.. versionchanged:: 5.0
This function produces no output of its own; only that produced
by the `unittest` module (Previously it would add a PASS or FAIL
log message).
""" """
from tornado.options import define, options, parse_command_line from tornado.options import define, options, parse_command_line
@ -719,23 +692,16 @@ def main(**kwargs):
if __name__ == '__main__' and len(argv) == 1: if __name__ == '__main__' and len(argv) == 1:
print("No tests specified", file=sys.stderr) print("No tests specified", file=sys.stderr)
sys.exit(1) sys.exit(1)
try: # In order to be able to run tests by their fully-qualified name
# In order to be able to run tests by their fully-qualified name # on the command line without importing all tests here,
# on the command line without importing all tests here, # module must be set to None. Python 3.2's unittest.main ignores
# module must be set to None. Python 3.2's unittest.main ignores # defaultTest if no module is given (it tries to do its own
# defaultTest if no module is given (it tries to do its own # test discovery, which is incompatible with auto2to3), so don't
# test discovery, which is incompatible with auto2to3), so don't # set module if we're not asking for a specific test.
# set module if we're not asking for a specific test. if len(argv) > 1:
if len(argv) > 1: unittest.main(module=None, argv=argv, **kwargs)
unittest.main(module=None, argv=argv, **kwargs) else:
else: unittest.main(defaultTest="all", argv=argv, **kwargs)
unittest.main(defaultTest="all", argv=argv, **kwargs)
except SystemExit as e:
if e.code == 0:
gen_log.info('PASS')
else:
gen_log.error('FAIL')
raise
if __name__ == '__main__': if __name__ == '__main__':

View file

@ -84,6 +84,16 @@ except ImportError:
is_finalizing = _get_emulated_is_finalizing() is_finalizing = _get_emulated_is_finalizing()
class TimeoutError(Exception):
"""Exception raised by `.with_timeout` and `.IOLoop.run_sync`.
.. versionchanged:: 5.0:
Unified ``tornado.gen.TimeoutError`` and
``tornado.ioloop.TimeoutError`` as ``tornado.util.TimeoutError``.
Both former names remain as aliases.
"""
class ObjectDict(_ObjectDictBase): class ObjectDict(_ObjectDictBase):
"""Makes a dictionary behave like an object, with attribute-style access. """Makes a dictionary behave like an object, with attribute-style access.
""" """
@ -272,6 +282,12 @@ class Configurable(object):
Configurable subclasses must define the class methods Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance `configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``. method `initialize` instead of ``__init__``.
.. versionchanged:: 5.0
It is now possible for configuration to be specified at
multiple levels of a class hierarchy.
""" """
__impl_class = None # type: type __impl_class = None # type: type
__impl_kwargs = None # type: Dict[str, Any] __impl_kwargs = None # type: Dict[str, Any]
@ -286,6 +302,9 @@ class Configurable(object):
else: else:
impl = cls impl = cls
init_kwargs.update(kwargs) init_kwargs.update(kwargs)
if impl.configurable_base() is not base:
# The impl class is itself configurable, so recurse.
return impl(*args, **init_kwargs)
instance = super(Configurable, cls).__new__(impl) instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatibility with AsyncHTTPClient # initialize vs __init__ chosen for compatibility with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__ # singleton magic. If we get rid of that we can switch to __init__
@ -343,7 +362,10 @@ class Configurable(object):
# type: () -> type # type: () -> type
"""Returns the currently configured class.""" """Returns the currently configured class."""
base = cls.configurable_base() base = cls.configurable_base()
if cls.__impl_class is None: # Manually mangle the private name to see whether this base
# has been configured (and not another base higher in the
# hierarchy).
if base.__dict__.get('_Configurable__impl_class') is None:
base.__impl_class = cls.configurable_default() base.__impl_class = cls.configurable_default()
return base.__impl_class return base.__impl_class

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #
@ -47,12 +46,14 @@ Thread-safety notes
------------------- -------------------
In general, methods on `RequestHandler` and elsewhere in Tornado are In general, methods on `RequestHandler` and elsewhere in Tornado are
not thread-safe. In particular, methods such as not thread-safe. In particular, methods such as
`~RequestHandler.write()`, `~RequestHandler.finish()`, and `~RequestHandler.write()`, `~RequestHandler.finish()`, and
`~RequestHandler.flush()` must only be called from the main thread. If `~RequestHandler.flush()` must only be called from the main thread. If
you use multiple threads it is important to use `.IOLoop.add_callback` you use multiple threads it is important to use `.IOLoop.add_callback`
to transfer control back to the main thread before finishing the to transfer control back to the main thread before finishing the
request. request, or to limit your use of other threads to
`.IOLoop.run_in_executor` and ensure that your callbacks running in
the executor do not refer to Tornado objects.
""" """
@ -80,7 +81,7 @@ import types
from inspect import isclass from inspect import isclass
from io import BytesIO from io import BytesIO
from tornado.concurrent import Future from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado import escape from tornado import escape
from tornado import gen from tornado import gen
from tornado import httputil from tornado import httputil
@ -245,7 +246,7 @@ class RequestHandler(object):
of the request method. of the request method.
Asynchronous support: Decorate this method with `.gen.coroutine` Asynchronous support: Decorate this method with `.gen.coroutine`
or `.return_future` to make it asynchronous (the or use ``async def`` to make it asynchronous (the
`asynchronous` decorator cannot be used on `prepare`). `asynchronous` decorator cannot be used on `prepare`).
If this method returns a `.Future` execution will not proceed If this method returns a `.Future` execution will not proceed
until the `.Future` is done. until the `.Future` is done.
@ -309,20 +310,21 @@ class RequestHandler(object):
def set_status(self, status_code, reason=None): def set_status(self, status_code, reason=None):
"""Sets the status code for our response. """Sets the status code for our response.
:arg int status_code: Response status code. If ``reason`` is ``None``, :arg int status_code: Response status code.
it must be present in `httplib.responses <http.client.responses>`. :arg str reason: Human-readable reason phrase describing the status
:arg string reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from code. If ``None``, it will be filled in from
`httplib.responses <http.client.responses>`. `http.client.responses` or "Unknown".
.. versionchanged:: 5.0
No longer validates that the response code is in
`http.client.responses`.
""" """
self._status_code = status_code self._status_code = status_code
if reason is not None: if reason is not None:
self._reason = escape.native_str(reason) self._reason = escape.native_str(reason)
else: else:
try: self._reason = httputil.responses.get(status_code, "Unknown")
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d" % status_code)
def get_status(self): def get_status(self):
"""Returns the status code for our response.""" """Returns the status code for our response."""
@ -521,18 +523,28 @@ class RequestHandler(object):
return self.request.cookies return self.request.cookies
def get_cookie(self, name, default=None): def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default.""" """Returns the value of the request cookie with the given name.
If the named cookie is not present, returns ``default``.
This method only returns cookies that were present in the request.
It does not see the outgoing cookies set by `set_cookie` in this
handler.
"""
if self.request.cookies is not None and name in self.request.cookies: if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value return self.request.cookies[name].value
return default return default
def set_cookie(self, name, value, domain=None, expires=None, path="/", def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs): expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options. """Sets an outgoing cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel Newly-set cookies are not immediately visible via `get_cookie`;
they are not present until the next request.
Additional keyword arguments are set on the cookies.Morsel
directly. directly.
See https://docs.python.org/2/library/cookie.html#Cookie.Morsel See https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel
for available attributes. for available attributes.
""" """
# The cookie library only accepts type str, in both python 2 and 3 # The cookie library only accepts type str, in both python 2 and 3
@ -574,6 +586,9 @@ class RequestHandler(object):
path and domain to clear a cookie as were used when that cookie path and domain to clear a cookie as were used when that cookie
was set (but there is no way to find out on the server side was set (but there is no way to find out on the server side
which values were used for a given cookie). which values were used for a given cookie).
Similar to `set_cookie`, the effect of this method will not be
seen until the following request.
""" """
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365) expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires, self.set_cookie(name, value="", path=path, expires=expires,
@ -585,6 +600,9 @@ class RequestHandler(object):
See `clear_cookie` for more information on the path and domain See `clear_cookie` for more information on the path and domain
parameters. parameters.
Similar to `set_cookie`, the effect of this method will not be
seen until the following request.
.. versionchanged:: 3.2 .. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters. Added the ``path`` and ``domain`` parameters.
@ -609,6 +627,9 @@ class RequestHandler(object):
Secure cookies may contain arbitrary byte values, not just unicode Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies) strings (unlike regular cookies)
Similar to `set_cookie`, the effect of this method will not be
seen until the following request.
.. versionchanged:: 3.2.1 .. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2 Added the ``version`` argument. Introduced cookie version 2
@ -648,6 +669,10 @@ class RequestHandler(object):
The decoded cookie value is returned as a byte string (unlike The decoded cookie value is returned as a byte string (unlike
`get_cookie`). `get_cookie`).
Similar to `get_cookie`, this method only returns cookies that
were present in the request. It does not see outgoing cookies set by
`set_secure_cookie` in this handler.
.. versionchanged:: 3.2.1 .. versionchanged:: 3.2.1
Added the ``min_version`` argument. Introduced cookie version 2; Added the ``min_version`` argument. Introduced cookie version 2;
@ -709,7 +734,8 @@ class RequestHandler(object):
if not isinstance(chunk, (bytes, unicode_type, dict)): if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects" message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list): if isinstance(chunk, list):
message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write" message += ". Lists not accepted for security reasons; see " + \
"http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
raise TypeError(message) raise TypeError(message)
if isinstance(chunk, dict): if isinstance(chunk, dict):
chunk = escape.json_encode(chunk) chunk = escape.json_encode(chunk)
@ -974,7 +1000,8 @@ class RequestHandler(object):
if self.check_etag_header(): if self.check_etag_header():
self._write_buffer = [] self._write_buffer = []
self.set_status(304) self.set_status(304)
if self._status_code in (204, 304): if (self._status_code in (204, 304) or
(self._status_code >= 100 and self._status_code < 200)):
assert not self._write_buffer, "Cannot send body with %s" % self._status_code assert not self._write_buffer, "Cannot send body with %s" % self._status_code
self._clear_headers_for_304() self._clear_headers_for_304()
elif "Content-Length" not in self._headers: elif "Content-Length" not in self._headers:
@ -1195,6 +1222,11 @@ class RequestHandler(object):
See http://en.wikipedia.org/wiki/Cross-site_request_forgery See http://en.wikipedia.org/wiki/Cross-site_request_forgery
This property is of type `bytes`, but it contains only ASCII
characters. If a character string is required, there is no
need to base64-encode it; just decode the byte string as
UTF-8.
.. versionchanged:: 3.2.2 .. versionchanged:: 3.2.2
The xsrf token will now be have a random mask applied in every The xsrf token will now be have a random mask applied in every
request, which makes it safe to include the token in pages request, which makes it safe to include the token in pages
@ -1491,7 +1523,7 @@ class RequestHandler(object):
if self._prepared_future is not None: if self._prepared_future is not None:
# Tell the Application we've finished with prepare() # Tell the Application we've finished with prepare()
# and are ready for the body to arrive. # and are ready for the body to arrive.
self._prepared_future.set_result(None) future_set_result_unless_cancelled(self._prepared_future, None)
if self._finished: if self._finished:
return return
@ -1516,6 +1548,9 @@ class RequestHandler(object):
self._handle_request_exception(e) self._handle_request_exception(e)
except Exception: except Exception:
app_log.error("Exception in exception handler", exc_info=True) app_log.error("Exception in exception handler", exc_info=True)
finally:
# Unset result to avoid circular references
result = None
if (self._prepared_future is not None and if (self._prepared_future is not None and
not self._prepared_future.done()): not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it # In case we failed before setting _prepared_future, do it
@ -1561,11 +1596,7 @@ class RequestHandler(object):
# send a response. # send a response.
return return
if isinstance(e, HTTPError): if isinstance(e, HTTPError):
if e.status_code not in httputil.responses and not e.reason: self.send_error(e.status_code, exc_info=sys.exc_info())
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else: else:
self.send_error(500, exc_info=sys.exc_info()) self.send_error(500, exc_info=sys.exc_info())
@ -1711,7 +1742,7 @@ def stream_request_body(cls):
See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_ See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_
for example usage. for example usage.
""" """ # noqa: E501
if not issubclass(cls, RequestHandler): if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls) raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True cls._stream_request_body = True
@ -1859,6 +1890,17 @@ class Application(ReversibleRouter):
If there's no match for the current request's host, then ``default_host`` If there's no match for the current request's host, then ``default_host``
parameter value is matched against host regular expressions. parameter value is matched against host regular expressions.
.. warning::
Applications that do not use TLS may be vulnerable to :ref:`DNS
rebinding <dnsrebinding>` attacks. This attack is especially
relevant to applications that only listen on ``127.0.0.1` or
other private networks. Appropriate host patterns must be used
(instead of the default of ``r'.*'``) to prevent this risk. The
``default_host`` argument must not be used in applications that
may be vulnerable to DNS rebinding.
You can serve static files by sending the ``static_path`` setting You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the ``/static/`` URI (this is configurable with the
@ -1869,6 +1911,7 @@ class Application(ReversibleRouter):
.. versionchanged:: 4.5 .. versionchanged:: 4.5
Integration with the new `tornado.routing` module. Integration with the new `tornado.routing` module.
""" """
def __init__(self, handlers=None, default_host=None, transforms=None, def __init__(self, handlers=None, default_host=None, transforms=None,
**settings): **settings):
@ -2089,7 +2132,7 @@ class _HandlerDelegate(httputil.HTTPMessageDelegate):
def finish(self): def finish(self):
if self.stream_request_body: if self.stream_request_body:
self.request.body.set_result(None) future_set_result_unless_cancelled(self.request.body, None)
else: else:
self.request.body = b''.join(self.chunks) self.request.body = b''.join(self.chunks)
self.request._parse_body() self.request._parse_body()
@ -2146,11 +2189,11 @@ class HTTPError(Exception):
:arg int status_code: HTTP status code. Must be listed in :arg int status_code: HTTP status code. Must be listed in
`httplib.responses <http.client.responses>` unless the ``reason`` `httplib.responses <http.client.responses>` unless the ``reason``
keyword argument is given. keyword argument is given.
:arg string log_message: Message to be written to the log for this error :arg str log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug (will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters. in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase :arg str reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used determined automatically from ``status_code``, but can be used
to use a non-standard numeric code. to use a non-standard numeric code.
@ -2256,13 +2299,21 @@ class RedirectHandler(RequestHandler):
.. versionchanged:: 4.5 .. versionchanged:: 4.5
Added support for substitutions into the destination URL. Added support for substitutions into the destination URL.
.. versionchanged:: 5.0
If any query arguments are present, they will be copied to the
destination URL.
""" """
def initialize(self, url, permanent=True): def initialize(self, url, permanent=True):
self._url = url self._url = url
self._permanent = permanent self._permanent = permanent
def get(self, *args): def get(self, *args):
self.redirect(self._url.format(*args), permanent=self._permanent) to_url = self._url.format(*args)
if self.request.query_arguments:
to_url = httputil.url_concat(
to_url, list(httputil.qs_to_qsl(self.request.query_arguments)))
self.redirect(to_url, permanent=self._permanent)
class StaticFileHandler(RequestHandler): class StaticFileHandler(RequestHandler):
@ -2467,8 +2518,9 @@ class StaticFileHandler(RequestHandler):
.. versionadded:: 3.1 .. versionadded:: 3.1
""" """
if self.check_etag_header(): # If client sent If-None-Match, use it, ignore If-Modified-Since
return True if self.request.headers.get('If-None-Match'):
return self.check_etag_header()
# Check the If-Modified-Since, and don't send the result if the # Check the If-Modified-Since, and don't send the result if the
# content has not been modified # content has not been modified
@ -2786,7 +2838,7 @@ class OutputTransform(object):
pass pass
def transform_first_chunk(self, status_code, headers, chunk, finishing): def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501
return status_code, headers, chunk return status_code, headers, chunk
def transform_chunk(self, chunk, finishing): def transform_chunk(self, chunk, finishing):
@ -2827,7 +2879,7 @@ class GZipContentEncoding(OutputTransform):
return ctype.startswith('text/') or ctype in self.CONTENT_TYPES return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def transform_first_chunk(self, status_code, headers, chunk, finishing): def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501
# TODO: can/should this type be inherited from the superclass? # TODO: can/should this type be inherited from the superclass?
if 'Vary' in headers: if 'Vary' in headers:
headers['Vary'] += ', Accept-Encoding' headers['Vary'] += ', Accept-Encoding'

View file

@ -17,7 +17,6 @@ the protocol (known as "draft 76") and are not compatible with this module.
""" """
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
# Author: Jacob Kristhammar, 2010
import base64 import base64
import collections import collections
@ -28,7 +27,7 @@ import tornado.escape
import tornado.web import tornado.web
import zlib import zlib
from tornado.concurrent import TracebackFuture from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado.escape import utf8, native_str, to_unicode from tornado.escape import utf8, native_str, to_unicode
from tornado import gen, httpclient, httputil from tornado import gen, httpclient, httputil
from tornado.ioloop import IOLoop, PeriodicCallback from tornado.ioloop import IOLoop, PeriodicCallback
@ -237,6 +236,7 @@ class WebSocketHandler(tornado.web.RequestHandler):
is allowed. is allowed.
If the connection is already closed, raises `WebSocketClosedError`. If the connection is already closed, raises `WebSocketClosedError`.
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 3.2 .. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection `WebSocketClosedError` was added (previously a closed connection
@ -244,6 +244,10 @@ class WebSocketHandler(tornado.web.RequestHandler):
.. versionchanged:: 4.3 .. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control. Returns a `.Future` which can be used for flow control.
.. versionchanged:: 5.0
Consistently raises `WebSocketClosedError`. Previously could
sometimes raise `.StreamClosedError`.
""" """
if self.ws_connection is None: if self.ws_connection is None:
raise WebSocketClosedError() raise WebSocketClosedError()
@ -308,8 +312,23 @@ class WebSocketHandler(tornado.web.RequestHandler):
""" """
raise NotImplementedError raise NotImplementedError
def ping(self, data): def ping(self, data=b''):
"""Send ping frame to the remote end.""" """Send ping frame to the remote end.
The data argument allows a small amount of data (up to 125
bytes) to be sent as a part of the ping message. Note that not
all websocket implementations expose this data to
applications.
Consider using the ``websocket_ping_interval`` application
setting instead of sending pings manually.
.. versionchanged:: 5.1
The data argument is now optional.
"""
data = utf8(data)
if self.ws_connection is None: if self.ws_connection is None:
raise WebSocketClosedError() raise WebSocketClosedError()
self.ws_connection.write_ping(data) self.ws_connection.write_ping(data)
@ -539,7 +558,8 @@ class _PerMessageDeflateCompressor(object):
self._compressor = None self._compressor = None
def _create_compressor(self): def _create_compressor(self):
return zlib.compressobj(self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level) return zlib.compressobj(self._compression_level,
zlib.DEFLATED, -self._max_wbits, self._mem_level)
def compress(self, data): def compress(self, data):
compressor = self._compressor or self._create_compressor() compressor = self._compressor or self._create_compressor()
@ -616,6 +636,14 @@ class WebSocketProtocol13(WebSocketProtocol):
def accept_connection(self): def accept_connection(self):
try: try:
self._handle_websocket_headers() self._handle_websocket_headers()
except ValueError:
self.handler.set_status(400)
log_msg = "Missing/Invalid WebSocket headers"
self.handler.finish(log_msg)
gen_log.debug(log_msg)
return
try:
self._accept_connection() self._accept_connection()
except ValueError: except ValueError:
gen_log.debug("Malformed WebSocket request received", gen_log.debug("Malformed WebSocket request received",
@ -648,8 +676,7 @@ class WebSocketProtocol13(WebSocketProtocol):
self.request.headers.get("Sec-Websocket-Key")) self.request.headers.get("Sec-Websocket-Key"))
def _accept_connection(self): def _accept_connection(self):
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '') subprotocols = [s.strip() for s in self.request.headers.get_list("Sec-WebSocket-Protocol")]
subprotocols = [s.strip() for s in subprotocols.split(',')]
if subprotocols: if subprotocols:
selected = self.handler.select_subprotocol(subprotocols) selected = self.handler.select_subprotocol(subprotocols)
if selected: if selected:
@ -743,31 +770,35 @@ class WebSocketProtocol13(WebSocketProtocol):
**self._get_compressor_options(other_side, agreed_parameters, compression_options)) **self._get_compressor_options(other_side, agreed_parameters, compression_options))
def _write_frame(self, fin, opcode, data, flags=0): def _write_frame(self, fin, opcode, data, flags=0):
data_len = len(data)
if opcode & 0x8:
# All control frames MUST have a payload length of 125
# bytes or less and MUST NOT be fragmented.
if not fin:
raise ValueError("control frames may not be fragmented")
if data_len > 125:
raise ValueError("control frame payloads may not exceed 125 bytes")
if fin: if fin:
finbit = self.FIN finbit = self.FIN
else: else:
finbit = 0 finbit = 0
frame = struct.pack("B", finbit | opcode | flags) frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing: if self.mask_outgoing:
mask_bit = 0x80 mask_bit = 0x80
else: else:
mask_bit = 0 mask_bit = 0
if l < 126: if data_len < 126:
frame += struct.pack("B", l | mask_bit) frame += struct.pack("B", data_len | mask_bit)
elif l <= 0xFFFF: elif data_len <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l) frame += struct.pack("!BH", 126 | mask_bit, data_len)
else: else:
frame += struct.pack("!BQ", 127 | mask_bit, l) frame += struct.pack("!BQ", 127 | mask_bit, data_len)
if self.mask_outgoing: if self.mask_outgoing:
mask = os.urandom(4) mask = os.urandom(4)
data = mask + _websocket_mask(mask, data) data = mask + _websocket_mask(mask, data)
frame += data frame += data
self._wire_bytes_out += len(frame) self._wire_bytes_out += len(frame)
try: return self.stream.write(frame)
return self.stream.write(frame)
except StreamClosedError:
self._abort()
def write_message(self, message, binary=False): def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.""" """Sends the given message to the client of this Web Socket."""
@ -782,7 +813,23 @@ class WebSocketProtocol13(WebSocketProtocol):
if self._compressor: if self._compressor:
message = self._compressor.compress(message) message = self._compressor.compress(message)
flags |= self.RSV1 flags |= self.RSV1
return self._write_frame(True, opcode, message, flags=flags) # For historical reasons, write methods in Tornado operate in a semi-synchronous
# mode in which awaiting the Future they return is optional (But errors can
# still be raised). This requires us to go through an awkward dance here
# to transform the errors that may be returned while presenting the same
# semi-synchronous interface.
try:
fut = self._write_frame(True, opcode, message, flags=flags)
except StreamClosedError:
raise WebSocketClosedError()
@gen.coroutine
def wrapper():
try:
yield fut
except StreamClosedError:
raise WebSocketClosedError()
return wrapper()
def write_ping(self, data): def write_ping(self, data):
"""Send ping frame.""" """Send ping frame."""
@ -951,7 +998,10 @@ class WebSocketProtocol13(WebSocketProtocol):
self.close(self.handler.close_code) self.close(self.handler.close_code)
elif opcode == 0x9: elif opcode == 0x9:
# Ping # Ping
self._write_frame(True, 0xA, data) try:
self._write_frame(True, 0xA, data)
except StreamClosedError:
self._abort()
self._run_callback(self.handler.on_ping, data) self._run_callback(self.handler.on_ping, data)
elif opcode == 0xA: elif opcode == 0xA:
# Pong # Pong
@ -972,7 +1022,10 @@ class WebSocketProtocol13(WebSocketProtocol):
close_data = struct.pack('>H', code) close_data = struct.pack('>H', code)
if reason is not None: if reason is not None:
close_data += utf8(reason) close_data += utf8(reason)
self._write_frame(True, 0x8, close_data) try:
self._write_frame(True, 0x8, close_data)
except StreamClosedError:
self._abort()
self.server_terminated = True self.server_terminated = True
if self.client_terminated: if self.client_terminated:
if self._waiting is not None: if self._waiting is not None:
@ -1037,11 +1090,11 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
This class should not be instantiated directly; use the This class should not be instantiated directly; use the
`websocket_connect` function instead. `websocket_connect` function instead.
""" """
def __init__(self, io_loop, request, on_message_callback=None, def __init__(self, request, on_message_callback=None,
compression_options=None, ping_interval=None, ping_timeout=None, compression_options=None, ping_interval=None, ping_timeout=None,
max_message_size=None): max_message_size=None):
self.compression_options = compression_options self.compression_options = compression_options
self.connect_future = TracebackFuture() self.connect_future = Future()
self.protocol = None self.protocol = None
self.read_future = None self.read_future = None
self.read_queue = collections.deque() self.read_queue = collections.deque()
@ -1070,9 +1123,9 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
request.headers['Sec-WebSocket-Extensions'] = ( request.headers['Sec-WebSocket-Extensions'] = (
'permessage-deflate; client_max_window_bits') 'permessage-deflate; client_max_window_bits')
self.tcp_client = TCPClient(io_loop=io_loop) self.tcp_client = TCPClient()
super(WebSocketClientConnection, self).__init__( super(WebSocketClientConnection, self).__init__(
io_loop, None, request, lambda: None, self._on_http_response, None, request, lambda: None, self._on_http_response,
104857600, self.tcp_client, 65536, 104857600) 104857600, self.tcp_client, 65536, 104857600)
def close(self, code=None, reason=None): def close(self, code=None, reason=None):
@ -1129,11 +1182,19 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
# ability to see exceptions. # ability to see exceptions.
self.final_callback = None self.final_callback = None
self.connect_future.set_result(self) future_set_result_unless_cancelled(self.connect_future, self)
def write_message(self, message, binary=False): def write_message(self, message, binary=False):
"""Sends a message to the WebSocket server.""" """Sends a message to the WebSocket server.
return self.protocol.write_message(message, binary)
If the stream is closed, raises `WebSocketClosedError`.
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 5.0
Exception raised on a closed stream changed from `.StreamClosedError`
to `WebSocketClosedError`.
"""
return self.protocol.write_message(message, binary=binary)
def read_message(self, callback=None): def read_message(self, callback=None):
"""Reads a message from the WebSocket server. """Reads a message from the WebSocket server.
@ -1147,9 +1208,9 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
ready. ready.
""" """
assert self.read_future is None assert self.read_future is None
future = TracebackFuture() future = Future()
if self.read_queue: if self.read_queue:
future.set_result(self.read_queue.popleft()) future_set_result_unless_cancelled(future, self.read_queue.popleft())
else: else:
self.read_future = future self.read_future = future
if callback is not None: if callback is not None:
@ -1160,11 +1221,30 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
if self._on_message_callback: if self._on_message_callback:
self._on_message_callback(message) self._on_message_callback(message)
elif self.read_future is not None: elif self.read_future is not None:
self.read_future.set_result(message) future_set_result_unless_cancelled(self.read_future, message)
self.read_future = None self.read_future = None
else: else:
self.read_queue.append(message) self.read_queue.append(message)
def ping(self, data=b''):
"""Send ping frame to the remote end.
The data argument allows a small amount of data (up to 125
bytes) to be sent as a part of the ping message. Note that not
all websocket implementations expose this data to
applications.
Consider using the ``ping_interval`` argument to
`websocket_connect` instead of sending pings manually.
.. versionadded:: 5.1
"""
data = utf8(data)
if self.protocol is None:
raise WebSocketClosedError()
self.protocol.write_ping(data)
def on_pong(self, data): def on_pong(self, data):
pass pass
@ -1176,7 +1256,7 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
compression_options=self.compression_options) compression_options=self.compression_options)
def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None, def websocket_connect(url, callback=None, connect_timeout=None,
on_message_callback=None, compression_options=None, on_message_callback=None, compression_options=None,
ping_interval=None, ping_timeout=None, ping_interval=None, ping_timeout=None,
max_message_size=None): max_message_size=None):
@ -1207,14 +1287,14 @@ def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
.. versionchanged:: 4.1 .. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``. Added ``compression_options`` and ``on_message_callback``.
The ``io_loop`` argument is deprecated.
.. versionchanged:: 4.5 .. versionchanged:: 4.5
Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size`` Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
arguments, which have the same meaning as in `WebSocketHandler`. arguments, which have the same meaning as in `WebSocketHandler`.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
""" """
if io_loop is None:
io_loop = IOLoop.current()
if isinstance(url, httpclient.HTTPRequest): if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None assert connect_timeout is None
request = url request = url
@ -1225,12 +1305,12 @@ def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout) request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = httpclient._RequestProxy( request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS) request, httpclient.HTTPRequest._DEFAULTS)
conn = WebSocketClientConnection(io_loop, request, conn = WebSocketClientConnection(request,
on_message_callback=on_message_callback, on_message_callback=on_message_callback,
compression_options=compression_options, compression_options=compression_options,
ping_interval=ping_interval, ping_interval=ping_interval,
ping_timeout=ping_timeout, ping_timeout=ping_timeout,
max_message_size=max_message_size) max_message_size=max_message_size)
if callback is not None: if callback is not None:
io_loop.add_future(conn.connect_future, callback) IOLoop.current().add_future(conn.connect_future, callback)
return conn.connect_future return conn.connect_future

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# #
# Copyright 2009 Facebook # Copyright 2009 Facebook
# #

View file

@ -13,12 +13,12 @@ from tornado.ioloop import IOLoop
class WebServer(threading.Thread): class WebServer(threading.Thread):
def __init__(self, options={}, io_loop=None): def __init__(self, options={}, **kwargs):
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.daemon = True self.daemon = True
self.alive = True self.alive = True
self.name = 'TORNADO' self.name = 'TORNADO'
self.io_loop = io_loop or IOLoop.current() self.io_loop = None
self.server = None self.server = None
self.options = options self.options = options
@ -55,7 +55,7 @@ class WebServer(threading.Thread):
# Load the app # Load the app
self.app = Application([], self.app = Application([],
debug=True, debug=False,
autoreload=False, autoreload=False,
gzip=True, gzip=True,
cookie_secret=sickbeard.COOKIE_SECRET, cookie_secret=sickbeard.COOKIE_SECRET,
@ -144,6 +144,8 @@ class WebServer(threading.Thread):
logger.ERROR) logger.ERROR)
return return
self.io_loop = IOLoop.current()
try: try:
self.io_loop.start() self.io_loop.start()
self.io_loop.close(True) self.io_loop.close(True)
@ -153,4 +155,5 @@ class WebServer(threading.Thread):
def shutDown(self): def shutDown(self):
self.alive = False self.alive = False
self.io_loop.stop() if None is not self.io_loop:
self.io_loop.stop()