Update Tornado Web Server 4.5.1 (79b2683) → 5.0.1 (35a538f).

This commit is contained in:
Prinz23 2018-03-27 17:13:58 +01:00 committed by JackDandy
parent 11b05e3699
commit e8ade6ffcf
46 changed files with 2165 additions and 1173 deletions

View file

@ -18,6 +18,7 @@
* Update scandir 1.3 to 1.6 (c3592ee)
* Update SimpleJSON library 3.10.0 (c52efea) to 3.13.2 (6ffddbe)
* Update Six compatibility library 1.10.0 (r433) to 1.11.0 (68112f3)
* Update Tornado Web Server 4.5.1 (79b2683) to 5.0.1 (35a538f)
[develop changelog]

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
@ -25,5 +24,5 @@ from __future__ import absolute_import, division, print_function
# is zero for an official release, positive for a development branch,
# or negative for a release candidate or beta (after the base version
# number has been incremented)
version = "4.5.1"
version_info = (4, 5, 1, 0)
version = "5.1.dev1"
version_info = (5, 1, 0, -100)

View file

@ -1,5 +1,4 @@
#!/usr/bin/env python
# coding: utf-8
# -*- coding: utf-8 -*-
#
# Copyright 2012 Facebook
#

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
@ -74,8 +73,11 @@ import hashlib
import hmac
import time
import uuid
import warnings
from tornado.concurrent import TracebackFuture, return_future, chain_future
from tornado.concurrent import (Future, return_future, chain_future,
future_set_exc_info,
future_set_result_unless_cancelled)
from tornado import gen
from tornado import httpclient
from tornado import escape
@ -112,14 +114,19 @@ def _auth_return_future(f):
Note that when using this decorator the ``callback`` parameter
inside the function will actually be a future.
.. deprecated:: 5.1
Will be removed in 6.0.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = TracebackFuture()
future = Future()
callback, args, kwargs = replacer.replace(future, args, kwargs)
if callback is not None:
warnings.warn("callback arguments are deprecated, use the returned Future instead",
DeprecationWarning)
future.add_done_callback(
functools.partial(_auth_future_to_callback, callback))
@ -127,7 +134,7 @@ def _auth_return_future(f):
if future.done():
return False
else:
future.set_exc_info((typ, value, tb))
future_set_exc_info(future, (typ, value, tb))
return True
with ExceptionStackContext(handle_exception):
f(*args, **kwargs)
@ -161,6 +168,11 @@ class OpenIdMixin(object):
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
.. deprecated:: 5.1
The ``callback`` argument and returned awaitable will be removed
in Tornado 6.0; this will be an ordinary synchronous function.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
@ -178,6 +190,11 @@ class OpenIdMixin(object):
is present and `authenticate_redirect` if it is not).
The result of this method will generally be used to set a cookie.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
"""
# Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.items())
@ -295,7 +312,7 @@ class OpenIdMixin(object):
claimed_id = self.get_argument("openid.claimed_id", None)
if claimed_id:
user["claimed_id"] = claimed_id
future.set_result(user)
future_set_result_unless_cancelled(future, user)
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
@ -328,25 +345,29 @@ class OAuthMixin(object):
"""Redirects the user to obtain OAuth authorization for this service.
The ``callback_uri`` may be omitted if you have previously
registered a callback URI with the third-party service. For
some services (including Friendfeed), you must use a
previously-registered callback URI and cannot specify a
callback via this method.
registered a callback URI with the third-party service. For
some services, you must use a previously-registered callback
URI and cannot specify a callback via this method.
This method sets a cookie called ``_oauth_request_token`` which is
subsequently used (and cleared) in `get_authenticated_user` for
security purposes.
Note that this method is asynchronous, although it calls
`.RequestHandler.finish` for you so it may not be necessary
to pass a callback or use the `.Future` it returns. However,
if this method is called from a function decorated with
`.gen.coroutine`, you must call it with ``yield`` to keep the
response from being closed prematurely.
This method is asynchronous and must be called with ``await``
or ``yield`` (This is different from other ``auth*_redirect``
methods defined in this module). It calls
`.RequestHandler.finish` for you so you should not write any
other response after it returns.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
"""
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback")
@ -380,6 +401,11 @@ class OAuthMixin(object):
requests to this service on behalf of the user. The dictionary will
also contain other fields such as ``name``, depending on the service
used.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
"""
future = callback
request_key = escape.utf8(self.get_argument("oauth_token"))
@ -390,7 +416,8 @@ class OAuthMixin(object):
"Missing OAuth request token cookie"))
return
self.clear_cookie("_oauth_request_token")
cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
cookie_key, cookie_secret = [
base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
if cookie_key != request_key:
future.set_exception(AuthError(
"Request token does not match cookie"))
@ -477,7 +504,9 @@ class OAuthMixin(object):
return
access_token = _oauth_parse_response(response.body)
self._oauth_get_user_future(access_token).add_done_callback(
fut = self._oauth_get_user_future(access_token)
fut = gen.convert_yielded(fut)
fut.add_done_callback(
functools.partial(self._on_oauth_get_user, access_token, future))
def _oauth_consumer_token(self):
@ -502,7 +531,18 @@ class OAuthMixin(object):
For backwards compatibility, the callback-based ``_oauth_get_user``
method is also supported.
.. versionchanged:: 5.1
Subclasses may also define this method with ``async def``.
.. deprecated:: 5.1
The ``_oauth_get_user`` fallback is deprecated and support for it
will be removed in 6.0.
"""
warnings.warn("_oauth_get_user is deprecated, override _oauth_get_user_future instead",
DeprecationWarning)
# By default, call the old-style _oauth_get_user, but new code
# should override this method instead.
self._oauth_get_user(access_token, callback)
@ -519,7 +559,7 @@ class OAuthMixin(object):
future.set_exception(AuthError("Error getting user"))
return
user["access_token"] = access_token
future.set_result(user)
future_set_result_unless_cancelled(future, user)
def _oauth_request_parameters(self, url, access_token, parameters={},
method="GET"):
@ -586,6 +626,11 @@ class OAuth2Mixin(object):
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
.. deprecated:: 5.1
The ``callback`` argument and returned awaitable will be removed
in Tornado 6.0; this will be an ordinary synchronous function.
"""
args = {
"redirect_uri": redirect_uri,
@ -646,6 +691,11 @@ class OAuth2Mixin(object):
:hide:
.. versionadded:: 4.3
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
"""
all_args = {}
if access_token:
@ -668,7 +718,7 @@ class OAuth2Mixin(object):
(response.error, response.request.url)))
return
future.set_result(escape.json_decode(response.body))
future_set_result_unless_cancelled(future, escape.json_decode(response.body))
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
@ -732,6 +782,11 @@ class TwitterMixin(OAuthMixin):
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
"""
http = self.get_auth_http_client()
http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),
@ -779,6 +834,10 @@ class TwitterMixin(OAuthMixin):
.. testoutput::
:hide:
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
"""
if path.startswith('http:') or path.startswith('https:'):
# Raw urls are useful for e.g. search which doesn't follow the
@ -811,7 +870,7 @@ class TwitterMixin(OAuthMixin):
"Error response %s fetching %s" % (response.error,
response.request.url)))
return
future.set_result(escape.json_decode(response.body))
future_set_result_unless_cancelled(future, escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("twitter_consumer_key", "Twitter OAuth")
@ -848,8 +907,8 @@ class GoogleOAuth2Mixin(OAuth2Mixin):
.. versionadded:: 3.2
"""
_OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth"
_OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
_OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/v2/auth"
_OAUTH_ACCESS_TOKEN_URL = "https://www.googleapis.com/oauth2/v4/token"
_OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo"
_OAUTH_NO_CALLBACKS = False
_OAUTH_SETTINGS_KEY = 'google_oauth'
@ -894,7 +953,11 @@ class GoogleOAuth2Mixin(OAuth2Mixin):
.. testoutput::
:hide:
"""
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
""" # noqa: E501
http = self.get_auth_http_client()
body = urllib_parse.urlencode({
"redirect_uri": redirect_uri,
@ -906,7 +969,9 @@ class GoogleOAuth2Mixin(OAuth2Mixin):
http.fetch(self._OAUTH_ACCESS_TOKEN_URL,
functools.partial(self._on_access_token, callback),
method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body)
method="POST",
headers={'Content-Type': 'application/x-www-form-urlencoded'},
body=body)
def _on_access_token(self, future, response):
"""Callback function for the exchange to the access token."""
@ -915,7 +980,7 @@ class GoogleOAuth2Mixin(OAuth2Mixin):
return
args = escape.json_decode(response.body)
future.set_result(args)
future_set_result_unless_cancelled(future, args)
class FacebookGraphMixin(OAuth2Mixin):
@ -963,11 +1028,17 @@ class FacebookGraphMixin(OAuth2Mixin):
Tornado it will change from a string to an integer.
* ``id``, ``name``, ``first_name``, ``last_name``, ``locale``, ``picture``,
``link``, plus any fields named in the ``extra_fields`` argument. These
fields are copied from the Facebook graph API `user object <https://developers.facebook.com/docs/graph-api/reference/user>`_
fields are copied from the Facebook graph API
`user object <https://developers.facebook.com/docs/graph-api/reference/user>`_
.. versionchanged:: 4.5
The ``session_expires`` field was updated to support changes made to the
Facebook API in March 2017.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
"""
http = self.get_auth_http_client()
args = {
@ -986,6 +1057,7 @@ class FacebookGraphMixin(OAuth2Mixin):
functools.partial(self._on_access_token, redirect_uri, client_id,
client_secret, callback, fields))
@gen.coroutine
def _on_access_token(self, redirect_uri, client_id, client_secret,
future, fields, response):
if response.error:
@ -998,10 +1070,8 @@ class FacebookGraphMixin(OAuth2Mixin):
"expires_in": args.get("expires_in")
}
self.facebook_request(
user = yield self.facebook_request(
path="/me",
callback=functools.partial(
self._on_get_user_info, future, session, fields),
access_token=session["access_token"],
appsecret_proof=hmac.new(key=client_secret.encode('utf8'),
msg=session["access_token"].encode('utf8'),
@ -1009,9 +1079,8 @@ class FacebookGraphMixin(OAuth2Mixin):
fields=",".join(fields)
)
def _on_get_user_info(self, future, session, fields, user):
if user is None:
future.set_result(None)
future_set_result_unless_cancelled(future, None)
return
fieldmap = {}
@ -1024,7 +1093,7 @@ class FacebookGraphMixin(OAuth2Mixin):
# This should change in Tornado 5.0.
fieldmap.update({"access_token": session["access_token"],
"session_expires": str(session.get("expires_in"))})
future.set_result(fieldmap)
future_set_result_unless_cancelled(future, fieldmap)
@_auth_return_future
def facebook_request(self, path, callback, access_token=None,
@ -1045,7 +1114,7 @@ class FacebookGraphMixin(OAuth2Mixin):
Example usage:
..testcode::
.. testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@ -1075,6 +1144,11 @@ class FacebookGraphMixin(OAuth2Mixin):
.. versionchanged:: 3.1
Added the ability to override ``self._FACEBOOK_BASE_URL``.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
"""
url = self._FACEBOOK_BASE_URL + path
# Thanks to the _auth_return_future decorator, our "callback"

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
@ -63,12 +62,11 @@ import sys
# file.py gets added to the path, which can cause confusion as imports
# may become relative in spite of the future import.
#
# We address the former problem by setting the $PYTHONPATH environment
# variable before re-execution so the new process will see the correct
# path. We attempt to address the latter problem when tornado.autoreload
# is run as __main__, although we can't fix the general case because
# we cannot reliably reconstruct the original command line
# (http://bugs.python.org/issue14208).
# We address the former problem by reconstructing the original command
# line (Python >= 3.4) or by setting the $PYTHONPATH environment
# variable (Python < 3.4) before re-execution so the new process will
# see the correct path. We attempt to address the latter problem when
# tornado.autoreload is run as __main__.
if __name__ == "__main__":
# This sys.path manipulation must come before our imports (as much
@ -111,13 +109,13 @@ _reload_attempted = False
_io_loops = weakref.WeakKeyDictionary() # type: ignore
def start(io_loop=None, check_time=500):
def start(check_time=500):
"""Begins watching source files for changes.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
io_loop = io_loop or ioloop.IOLoop.current()
io_loop = ioloop.IOLoop.current()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
@ -125,7 +123,7 @@ def start(io_loop=None, check_time=500):
gen_log.warning("tornado.autoreload started more than once in the same process")
modify_times = {}
callback = functools.partial(_reload_on_update, modify_times)
scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop)
scheduler = ioloop.PeriodicCallback(callback, check_time)
scheduler.start()
@ -137,7 +135,7 @@ def wait():
the command-line interface in `main`)
"""
io_loop = ioloop.IOLoop()
start(io_loop)
io_loop.add_callback(start)
io_loop.start()
@ -209,21 +207,29 @@ def _reload():
# ioloop.set_blocking_log_threshold so it doesn't fire
# after the exec.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
# sys.path fixes: see comments at top of file. If sys.path[0] is an empty
# string, we were (probably) invoked with -m and the effective path
# is about to change on re-exec. Add the current directory to $PYTHONPATH
# to ensure that the new process sees the same path we did.
path_prefix = '.' + os.pathsep
if (sys.path[0] == '' and
not os.environ.get("PYTHONPATH", "").startswith(path_prefix)):
os.environ["PYTHONPATH"] = (path_prefix +
os.environ.get("PYTHONPATH", ""))
# sys.path fixes: see comments at top of file. If __main__.__spec__
# exists, we were invoked with -m and the effective path is about to
# change on re-exec. Reconstruct the original command line to
# ensure that the new process sees the same path we did. If
# __spec__ is not available (Python < 3.4), check instead if
# sys.path[0] is an empty string and add the current directory to
# $PYTHONPATH.
spec = getattr(sys.modules['__main__'], '__spec__', None)
if spec:
argv = ['-m', spec.name] + sys.argv[1:]
else:
argv = sys.argv
path_prefix = '.' + os.pathsep
if (sys.path[0] == '' and
not os.environ.get("PYTHONPATH", "").startswith(path_prefix)):
os.environ["PYTHONPATH"] = (path_prefix +
os.environ.get("PYTHONPATH", ""))
if not _has_execv:
subprocess.Popen([sys.executable] + sys.argv)
subprocess.Popen([sys.executable] + argv)
sys.exit(0)
else:
try:
os.execv(sys.executable, [sys.executable] + sys.argv)
os.execv(sys.executable, [sys.executable] + argv)
except OSError:
# Mac OS X versions prior to 10.6 do not support execv in
# a process that contains multiple threads. Instead of
@ -236,8 +242,7 @@ def _reload():
# Unfortunately the errno returned in this case does not
# appear to be consistent, so we can't easily check for
# this error specifically.
os.spawnv(os.P_NOWAIT, sys.executable,
[sys.executable] + sys.argv)
os.spawnv(os.P_NOWAIT, sys.executable, [sys.executable] + argv)
# At this point the IOLoop has been closed and finally
# blocks will experience errors if we allow the stack to
# unwind, so just exit uncleanly.

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#
@ -13,13 +12,19 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with threads and ``Futures``.
"""Utilities for working with ``Future`` objects.
``Futures`` are a pattern for concurrent programming introduced in
Python 3.2 in the `concurrent.futures` package. This package defines
a mostly-compatible `Future` class designed for use from coroutines,
as well as some utility functions for interacting with the
`concurrent.futures` package.
Python 3.2 in the `concurrent.futures` package, and also adopted (in a
slightly different form) in Python 3.4's `asyncio` package. This
package defines a ``Future`` class that is an alias for `asyncio.Future`
when available, and a compatible implementation for older versions of
Python. It also includes some utility functions for interacting with
``Future`` objects.
While this package is an important part of Tornado's internal
implementation, applications rarely need to interact with it
directly.
"""
from __future__ import absolute_import, division, print_function
@ -28,6 +33,7 @@ import platform
import textwrap
import traceback
import sys
import warnings
from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext, wrap
@ -38,6 +44,11 @@ try:
except ImportError:
futures = None
try:
import asyncio
except ImportError:
asyncio = None
try:
import typing
except ImportError:
@ -138,16 +149,17 @@ class Future(object):
Tornado they are normally used with `.IOLoop.add_future` or by
yielding them in a `.gen.coroutine`.
`tornado.concurrent.Future` is similar to
`concurrent.futures.Future`, but not thread-safe (and therefore
faster for use with single-threaded event loops).
`tornado.concurrent.Future` is an alias for `asyncio.Future` when
that package is available (Python 3.4+). Unlike
`concurrent.futures.Future`, the ``Futures`` used by Tornado and
`asyncio` are not thread-safe (and therefore faster for use with
single-threaded event loops).
In addition to ``exception`` and ``set_exception``, methods ``exc_info``
and ``set_exc_info`` are supported to capture tracebacks in Python 2.
The traceback is automatically available in Python 3, but in the
Python 2 futures backport this information is discarded.
This functionality was previously available in a separate class
``TracebackFuture``, which is now a deprecated alias for this class.
In addition to ``exception`` and ``set_exception``, Tornado's
``Future`` implementation supports storing an ``exc_info`` triple
to support better tracebacks on Python 2. To set an ``exc_info``
triple, use `future_set_exc_info`, and to retrieve one, call
`result()` (which will raise it).
.. versionchanged:: 4.0
`tornado.concurrent.Future` is always a thread-unsafe ``Future``
@ -164,6 +176,17 @@ class Future(object):
where it results in undesired logging it may be necessary to
suppress the logging by ensuring that the exception is observed:
``f.add_done_callback(lambda f: f.exception())``.
.. versionchanged:: 5.0
This class was previoiusly available under the name
``TracebackFuture``. This name, which was deprecated since
version 4.0, has been removed. When `asyncio` is available
``tornado.concurrent.Future`` is now an alias for
`asyncio.Future`. Like `asyncio.Future`, callbacks are now
always scheduled on the `.IOLoop` and are never run
synchronously.
"""
def __init__(self):
self._done = False
@ -265,7 +288,8 @@ class Future(object):
`add_done_callback` directly.
"""
if self._done:
fn(self)
from tornado.ioloop import IOLoop
IOLoop.current().add_callback(fn, self)
else:
self._callbacks.append(fn)
@ -320,13 +344,12 @@ class Future(object):
def _set_done(self):
self._done = True
for cb in self._callbacks:
try:
cb(self)
except Exception:
app_log.exception('Exception in callback %r for %r',
cb, self)
self._callbacks = None
if self._callbacks:
from tornado.ioloop import IOLoop
loop = IOLoop.current()
for cb in self._callbacks:
loop.add_callback(cb, self)
self._callbacks = None
# On Python 3.3 or older, objects with a destructor part of a reference
# cycle are never destroyed. It's no longer the case on Python 3.4 thanks to
@ -344,7 +367,8 @@ class Future(object):
self, ''.join(tb).rstrip())
TracebackFuture = Future
if asyncio is not None:
Future = asyncio.Future # noqa
if futures is None:
FUTURES = Future # type: typing.Union[type, typing.Tuple[type, ...]]
@ -358,11 +382,11 @@ def is_future(x):
class DummyExecutor(object):
def submit(self, fn, *args, **kwargs):
future = TracebackFuture()
future = Future()
try:
future.set_result(fn(*args, **kwargs))
future_set_result_unless_cancelled(future, fn(*args, **kwargs))
except Exception:
future.set_exc_info(sys.exc_info())
future_set_exc_info(future, sys.exc_info())
return future
def shutdown(self, wait=True):
@ -378,29 +402,53 @@ def run_on_executor(*args, **kwargs):
The decorated method may be called with a ``callback`` keyword
argument and returns a future.
The `.IOLoop` and executor to be used are determined by the ``io_loop``
and ``executor`` attributes of ``self``. To use different attributes,
pass keyword arguments to the decorator::
The executor to be used is determined by the ``executor``
attributes of ``self``. To use a different attribute name, pass a
keyword argument to the decorator::
@run_on_executor(executor='_thread_pool')
def foo(self):
pass
This decorator should not be confused with the similarly-named
`.IOLoop.run_in_executor`. In general, using ``run_in_executor``
when *calling* a blocking method is recommended instead of using
this decorator when *defining* a method. If compatibility with older
versions of Tornado is required, consider defining an executor
and using ``executor.submit()`` at the call site.
.. versionchanged:: 4.2
Added keyword arguments to use alternative attributes.
.. versionchanged:: 5.0
Always uses the current IOLoop instead of ``self.io_loop``.
.. versionchanged:: 5.1
Returns a `.Future` compatible with ``await`` instead of a
`concurrent.futures.Future`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in
6.0. The decorator itself is discouraged in new code but will
not be removed in 6.0.
"""
def run_on_executor_decorator(fn):
executor = kwargs.get("executor", "executor")
io_loop = kwargs.get("io_loop", "io_loop")
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
callback = kwargs.pop("callback", None)
future = getattr(self, executor).submit(fn, self, *args, **kwargs)
async_future = Future()
conc_future = getattr(self, executor).submit(fn, self, *args, **kwargs)
chain_future(conc_future, async_future)
if callback:
getattr(self, io_loop).add_future(
future, lambda future: callback(future.result()))
return future
warnings.warn("callback arguments are deprecated, use the returned Future instead",
DeprecationWarning)
from tornado.ioloop import IOLoop
IOLoop.current().add_future(
async_future, lambda future: callback(future.result()))
return async_future
return wrapper
if args and kwargs:
raise ValueError("cannot combine positional and keyword args")
@ -418,6 +466,10 @@ def return_future(f):
"""Decorator to make a function that returns via callback return a
`Future`.
This decorator was provided to ease the transition from
callback-oriented code to coroutines. It is not recommended for
new code.
The wrapped function should take a ``callback`` keyword argument
and invoke it with one argument when it has finished. To signal failure,
the function can simply raise an exception (which will be
@ -425,7 +477,7 @@ def return_future(f):
From the caller's perspective, the callback argument is optional.
If one is given, it will be invoked when the function is complete
with `Future.result()` as an argument. If the function fails, the
with ``Future.result()`` as an argument. If the function fails, the
callback will not be run and an exception will be raised into the
surrounding `.StackContext`.
@ -452,18 +504,28 @@ def return_future(f):
Note that ``@return_future`` and ``@gen.engine`` can be applied to the
same function, provided ``@return_future`` appears first. However,
consider using ``@gen.coroutine`` instead of this combination.
.. versionchanged:: 5.1
Now raises a `.DeprecationWarning` if a callback argument is passed to
the decorated function and deprecation warnings are enabled.
.. deprecated:: 5.1
New code should use coroutines directly instead of wrapping
callback-based code with this decorator.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = TracebackFuture()
future = Future()
callback, args, kwargs = replacer.replace(
lambda value=_NO_RESULT: future.set_result(value),
lambda value=_NO_RESULT: future_set_result_unless_cancelled(future, value),
args, kwargs)
def handle_error(typ, value, tb):
future.set_exc_info((typ, value, tb))
future_set_exc_info(future, (typ, value, tb))
return True
exc_info = None
with ExceptionStackContext(handle_error):
@ -489,13 +551,16 @@ def return_future(f):
# immediate exception, and again when the future resolves and
# the callback triggers its exception by calling future.result()).
if callback is not None:
warnings.warn("callback arguments are deprecated, use the returned Future instead",
DeprecationWarning)
def run_callback(future):
result = future.result()
if result is _NO_RESULT:
callback()
else:
callback(future.result())
future.add_done_callback(wrap(run_callback))
future_add_done_callback(future, wrap(run_callback))
return future
return wrapper
@ -505,17 +570,72 @@ def chain_future(a, b):
The result (success or failure) of ``a`` will be copied to ``b``, unless
``b`` has already been completed or cancelled by the time ``a`` finishes.
.. versionchanged:: 5.0
Now accepts both Tornado/asyncio `Future` objects and
`concurrent.futures.Future`.
"""
def copy(future):
assert future is a
if b.done():
return
if (isinstance(a, TracebackFuture) and
isinstance(b, TracebackFuture) and
if (hasattr(a, 'exc_info') and
a.exc_info() is not None):
b.set_exc_info(a.exc_info())
future_set_exc_info(b, a.exc_info())
elif a.exception() is not None:
b.set_exception(a.exception())
else:
b.set_result(a.result())
a.add_done_callback(copy)
if isinstance(a, Future):
future_add_done_callback(a, copy)
else:
# concurrent.futures.Future
from tornado.ioloop import IOLoop
IOLoop.current().add_future(a, copy)
def future_set_result_unless_cancelled(future, value):
"""Set the given ``value`` as the `Future`'s result, if not cancelled.
Avoids asyncio.InvalidStateError when calling set_result() on
a cancelled `asyncio.Future`.
.. versionadded:: 5.0
"""
if not future.cancelled():
future.set_result(value)
def future_set_exc_info(future, exc_info):
"""Set the given ``exc_info`` as the `Future`'s exception.
Understands both `asyncio.Future` and Tornado's extensions to
enable better tracebacks on Python 2.
.. versionadded:: 5.0
"""
if hasattr(future, 'set_exc_info'):
# Tornado's Future
future.set_exc_info(exc_info)
else:
# asyncio.Future
future.set_exception(exc_info[1])
def future_add_done_callback(future, callback):
"""Arrange to call ``callback`` when ``future`` is complete.
``callback`` is invoked with one argument, the ``future``.
If ``future`` is already done, ``callback`` is invoked immediately.
This may differ from the behavior of ``Future.add_done_callback``,
which makes no such guarantee.
.. versionadded:: 5.0
"""
if future.done():
callback(future)
else:
future.add_done_callback(callback)

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
@ -37,8 +36,8 @@ curl_log = logging.getLogger('tornado.curl_httpclient')
class CurlAsyncHTTPClient(AsyncHTTPClient):
def initialize(self, io_loop, max_clients=10, defaults=None):
super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults)
def initialize(self, max_clients=10, defaults=None):
super(CurlAsyncHTTPClient, self).initialize(defaults=defaults)
self._multi = pycurl.CurlMulti()
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
@ -53,7 +52,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
# SOCKETFUNCTION. Mitigate the effects of such bugs by
# forcing a periodic scan of all active requests.
self._force_timeout_callback = ioloop.PeriodicCallback(
self._handle_force_timeout, 1000, io_loop=io_loop)
self._handle_force_timeout, 1000)
self._force_timeout_callback.start()
# Work around a bug in libcurl 7.29.0: Some fields in the curl
@ -74,6 +73,12 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
self._multi.close()
super(CurlAsyncHTTPClient, self).close()
# Set below properties to None to reduce the reference count of current
# instance, because those properties hold some methods of current
# instance that will case circular reference.
self._force_timeout_callback = None
self._multi = None
def fetch_impl(self, request, callback):
self._requests.append((request, callback))
self._process_queue()
@ -255,6 +260,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
queue=info["curl_start_time"] - info["request"].start_time,
namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
connect=curl.getinfo(pycurl.CONNECT_TIME),
appconnect=curl.getinfo(pycurl.APPCONNECT_TIME),
pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME),
starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME),
total=curl.getinfo(pycurl.TOTAL_TIME),
@ -494,8 +500,10 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
def _curl_debug(self, debug_type, debug_msg):
debug_types = ('I', '<', '>', '<', '>')
if debug_type == 0:
debug_msg = native_str(debug_msg)
curl_log.debug('%s', debug_msg.strip())
elif debug_type in (1, 2):
debug_msg = native_str(debug_msg)
for line in debug_msg.splitlines():
curl_log.debug('%s %s', debug_types[debug_type], line)
elif debug_type == 4:

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
@ -274,7 +273,9 @@ def recursive_unicode(obj):
# This regex should avoid those problems.
# Use to_unicode instead of tornado.util.u - we don't want backslashes getting
# processed as escapes.
_URL_RE = re.compile(to_unicode(r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&amp;|&quot;)*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&amp;|&quot;)*\)))+)"""))
_URL_RE = re.compile(to_unicode(
r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&amp;|&quot;)*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&amp;|&quot;)*\)))+)""" # noqa: E501
))
def linkify(text, shorten=False, extra_params="",
@ -289,24 +290,24 @@ def linkify(text, shorten=False, extra_params="",
* ``shorten``: Long urls will be shortened for display.
* ``extra_params``: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
* ``require_protocol``: Only linkify urls which include a protocol. If
this is False, urls such as www.facebook.com will also be linkified.
this is False, urls such as www.facebook.com will also be linkified.
* ``permitted_protocols``: List (or set) of protocols which should be
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
"mailto"])``. It is very unsafe to include protocols such as
``javascript``.
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
"mailto"])``. It is very unsafe to include protocols such as
``javascript``.
"""
if extra_params and not callable(extra_params):
extra_params = " " + extra_params.strip()

View file

@ -1,6 +1,20 @@
"""``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
"""``tornado.gen`` implements generator-based coroutines.
.. note::
The "decorator and generator" approach in this module is a
precursor to native coroutines (using ``async def`` and ``await``)
which were introduced in Python 3.5. Applications that do not
require compatibility with older versions of Python should use
native coroutines instead. Some parts of this module are still
useful with native coroutines, notably `multi`, `sleep`,
`WaitIterator`, and `with_timeout`. Some of these functions have
counterparts in the `asyncio` module which may be used as well,
although the two may not necessarily be 100% compatible.
Coroutines provide an easier way to work in an asynchronous
environment than chaining callbacks. Code using coroutines is
technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler:
@ -37,7 +51,7 @@ could be written with ``gen`` as:
:hide:
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
yielding this object returns its ``Future.result``.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
@ -81,15 +95,15 @@ import functools
import itertools
import os
import sys
import textwrap
import types
import weakref
import warnings
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.concurrent import (Future, is_future, chain_future, future_set_exc_info,
future_add_done_callback, future_set_result_unless_cancelled)
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
from tornado.util import PY3, raise_exc_info
from tornado.util import PY3, raise_exc_info, TimeoutError
try:
try:
@ -154,10 +168,6 @@ class ReturnValueIgnoredError(Exception):
pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def _value_from_stopiteration(e):
try:
# StopIteration has a value attribute beginning in py33.
@ -173,6 +183,21 @@ def _value_from_stopiteration(e):
return None
def _create_future():
future = Future()
# Fixup asyncio debug info by removing extraneous stack entries
source_traceback = getattr(future, "_source_traceback", ())
while source_traceback:
# Each traceback entry is equivalent to a
# (filename, self.lineno, self.name, self.line) tuple
filename = source_traceback[-1][0]
if filename == __file__:
del source_traceback[-1]
else:
break
return future
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
@ -189,7 +214,14 @@ def engine(func):
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
.. deprecated:: 5.1
This decorator will be removed in 6.0. Use `coroutine` or
``async def`` instead.
"""
warnings.warn("gen.engine is deprecated, use gen.coroutine or async def instead",
DeprecationWarning)
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
@ -204,11 +236,11 @@ def engine(func):
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback))
future_add_done_callback(future, stack_context.wrap(final_callback))
return wrapper
def coroutine(func, replace_callback=True):
def coroutine(func):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
@ -229,9 +261,6 @@ def coroutine(func, replace_callback=True):
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning::
When exceptions occur inside a coroutine, the exception
@ -242,30 +271,14 @@ def coroutine(func, replace_callback=True):
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
# Ties lifetime of runners to their result futures. Github Issue #1769
# Generators, like any object in Python, must be strong referenced
# in order to not be cleaned up by the garbage collector. When using
# coroutines, the Runner object is what strong-refs the inner
# generator. However, the only item that strong-reffed the Runner
# was the last Future that the inner generator yielded (via the
# Future's internal done_callback list). Usually this is enough, but
# it is also possible for this Future to not have any strong references
# other than other objects referenced by the Runner object (usually
# when using other callback patterns and/or weakrefs). In this
# situation, if a garbage collection ran, a cycle would be detected and
# Runner objects could be destroyed along with their inner generators
# and everything in their local scope.
# This map provides strong references to Runner objects as long as
# their result future objects also have strong references (typically
# from the parent coroutine's Runner). This keeps the coroutine's
# Runner alive.
_futures_to_runners = weakref.WeakKeyDictionary()
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
@ -281,9 +294,11 @@ def _make_coroutine_wrapper(func, replace_callback):
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
future = TracebackFuture()
future = _create_future()
if replace_callback and 'callback' in kwargs:
warnings.warn("callback arguments are deprecated, use the returned Future instead",
DeprecationWarning, stacklevel=2)
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
@ -293,8 +308,12 @@ def _make_coroutine_wrapper(func, replace_callback):
except (Return, StopIteration) as e:
result = _value_from_stopiteration(e)
except Exception:
future.set_exc_info(sys.exc_info())
return future
future_set_exc_info(future, sys.exc_info())
try:
return future
finally:
# Avoid circular references
future = None
else:
if isinstance(result, GeneratorType):
# Inline the first iteration of Runner.run. This lets us
@ -306,17 +325,26 @@ def _make_coroutine_wrapper(func, replace_callback):
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture()
yielded = _create_future()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future.set_result(_value_from_stopiteration(e))
future_set_result_unless_cancelled(future, _value_from_stopiteration(e))
except Exception:
future.set_exc_info(sys.exc_info())
future_set_exc_info(future, sys.exc_info())
else:
_futures_to_runners[future] = Runner(result, future, yielded)
# Provide strong references to Runner objects as long
# as their result future objects also have strong
# references (typically from the parent coroutine's
# Runner). This keeps the coroutine's Runner alive.
# We do this by exploiting the public API
# add_done_callback() instead of putting a private
# attribute on the Future.
# (Github issues #1769, #2229).
runner = Runner(result, future, yielded)
future.add_done_callback(lambda _: runner)
yielded = None
try:
return future
@ -330,7 +358,7 @@ def _make_coroutine_wrapper(func, replace_callback):
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future.set_result(result)
future_set_result_unless_cancelled(future, result)
return future
wrapper.__wrapped__ = wrapped
@ -444,7 +472,7 @@ class WaitIterator(object):
self._running_future = None
for future in futures:
future.add_done_callback(self._done_callback)
future_add_done_callback(future, self._done_callback)
def done(self):
"""Returns True if this iterator has no more results."""
@ -460,7 +488,7 @@ class WaitIterator(object):
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = TracebackFuture()
self._running_future = Future()
if self._finished:
self._return_result(self._finished.popleft())
@ -482,9 +510,8 @@ class WaitIterator(object):
self.current_future = done
self.current_index = self._unfinished.pop(done)
@coroutine
def __aiter__(self):
raise Return(self)
return self
def __anext__(self):
if self.done():
@ -497,8 +524,13 @@ class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
Use `Futures <.Future>` instead. This class and all its subclasses
will be removed in 6.0
"""
def __init__(self):
warnings.warn("YieldPoint is deprecated, use Futures instead",
DeprecationWarning)
def start(self, runner):
"""Called by the runner after the generator has yielded.
@ -535,9 +567,11 @@ class Callback(YieldPoint):
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
Use `Futures <.Future>` instead. This class will be removed in 6.0.
"""
def __init__(self, key):
warnings.warn("gen.Callback is deprecated, use Futures instead",
DeprecationWarning)
self.key = key
def start(self, runner):
@ -555,9 +589,11 @@ class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
Use `Futures <.Future>` instead. This class will be removed in 6.0.
"""
def __init__(self, key):
warnings.warn("gen.Wait is deprecated, use Futures instead",
DeprecationWarning)
self.key = key
def start(self, runner):
@ -579,9 +615,11 @@ class WaitAll(YieldPoint):
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
Use `Futures <.Future>` instead. This class will be removed in 6.0.
"""
def __init__(self, keys):
warnings.warn("gen.WaitAll is deprecated, use gen.multi instead",
DeprecationWarning)
self.keys = keys
def start(self, runner):
@ -605,33 +643,43 @@ def Task(func, *args, **kwargs):
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
.. deprecated:: 5.1
This function is deprecated and will be removed in 6.0.
"""
future = Future()
warnings.warn("gen.Task is deprecated, use Futures instead",
DeprecationWarning)
future = _create_future()
def handle_exception(typ, value, tb):
if future.done():
return False
future.set_exc_info((typ, value, tb))
future_set_exc_info(future, (typ, value, tb))
return True
def set_result(result):
if future.done():
return
future.set_result(result)
future_set_result_unless_cancelled(future, result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
def __init__(self, future):
"""Adapts a `.Future` to the `YieldPoint` interface.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. deprecated:: 5.1
This class will be removed in 6.0.
"""
warnings.warn("YieldFuture is deprecated, use Futures instead",
DeprecationWarning)
self.future = future
self.io_loop = io_loop or IOLoop.current()
self.io_loop = IOLoop.current()
def start(self, runner):
if not self.future.done():
@ -704,6 +752,10 @@ def multi(children, quiet_exceptions=()):
This function is available under the names ``multi()`` and ``Multi()``
for historical reasons.
Cancelling a `.Future` returned by ``multi()`` does not cancel its
children. `asyncio.gather` is similar to ``multi()``, but it does
cancel its children.
.. versionchanged:: 4.2
If multiple yieldables fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions``
@ -741,9 +793,11 @@ class MultiYieldPoint(YieldPoint):
remains as an alias for the equivalent `multi` function.
.. deprecated:: 4.3
Use `multi` instead.
Use `multi` instead. This class will be removed in 6.0.
"""
def __init__(self, children, quiet_exceptions=()):
warnings.warn("MultiYieldPoint is deprecated, use Futures instead",
DeprecationWarning)
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
@ -812,12 +866,13 @@ def multi_future(children, quiet_exceptions=()):
else:
keys = None
children = list(map(convert_yielded, children))
assert all(is_future(i) for i in children)
assert all(is_future(i) or isinstance(i, _NullFuture) for i in children)
unfinished_children = set(children)
future = Future()
future = _create_future()
if not children:
future.set_result({} if keys is not None else [])
future_set_result_unless_cancelled(future,
{} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
@ -832,18 +887,19 @@ def multi_future(children, quiet_exceptions=()):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
else:
future.set_exc_info(sys.exc_info())
future_set_exc_info(future, sys.exc_info())
if not future.done():
if keys is not None:
future.set_result(dict(zip(keys, result_list)))
future_set_result_unless_cancelled(future,
dict(zip(keys, result_list)))
else:
future.set_result(result_list)
future_set_result_unless_cancelled(future, result_list)
listening = set()
for f in children:
if f not in listening:
listening.add(f)
f.add_done_callback(callback)
future_add_done_callback(f, callback)
return future
@ -863,18 +919,18 @@ def maybe_future(x):
if is_future(x):
return x
else:
fut = Future()
fut = _create_future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
def with_timeout(timeout, future, quiet_exceptions=()):
"""Wraps a `.Future` (or other yieldable object) in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
Raises `tornado.util.TimeoutError` if the input future does not
complete before ``timeout``, which may be specified in any form
allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
an absolute time relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
@ -882,6 +938,10 @@ def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
Does not support `YieldPoint` subclasses.
The wrapped `.Future` is not canceled when the timeout expires,
permitting it to be reused. `asyncio.wait_for` is similar to this
function but it does cancel the wrapped `.Future` on timeout.
.. versionadded:: 4.0
.. versionchanged:: 4.1
@ -890,6 +950,7 @@ def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
.. versionchanged:: 4.4
Added support for yieldable objects other than `.Future`.
"""
# TODO: allow YieldPoints in addition to other yieldables?
# Tricky to do with stack_context semantics.
@ -900,10 +961,9 @@ def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
future = convert_yielded(future)
result = Future()
result = _create_future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
io_loop = IOLoop.current()
def error_callback(future):
try:
@ -914,17 +974,18 @@ def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
future, exc_info=True)
def timeout_callback():
result.set_exception(TimeoutError("Timeout"))
if not result.done():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future.add_done_callback(error_callback)
future_add_done_callback(future, error_callback)
timeout_handle = io_loop.add_timeout(
timeout, timeout_callback)
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
future_add_done_callback(
future, lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
@ -947,15 +1008,31 @@ def sleep(duration):
.. versionadded:: 4.1
"""
f = Future()
IOLoop.current().call_later(duration, lambda: f.set_result(None))
f = _create_future()
IOLoop.current().call_later(duration,
lambda: future_set_result_unless_cancelled(f, None))
return f
_null_future = Future()
_null_future.set_result(None)
class _NullFuture(object):
"""_NullFuture resembles a Future that finished with a result of None.
moment = Future()
It's not actually a `Future` to avoid depending on a particular event loop.
Handled as a special case in the coroutine runner.
"""
def result(self):
return None
def done(self):
return True
# _null_future is used as a dummy value in the coroutine runner. It differs
# from moment in that moment always adds a delay of one IOLoop iteration
# while _null_future is processed as soon as possible.
_null_future = _NullFuture()
moment = _NullFuture()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
@ -968,9 +1045,9 @@ Usage: ``yield gen.moment``
.. versionadded:: 4.0
.. deprecated:: 4.5
``yield None`` is now equivalent to ``yield gen.moment``.
``yield None`` (or ``yield`` with no argument) is now equivalent to
``yield gen.moment``.
"""
moment.set_result(None)
class Runner(object):
@ -979,7 +1056,7 @@ class Runner(object):
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`)
`.Future`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
@ -1023,9 +1100,10 @@ class Runner(object):
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
future_set_result_unless_cancelled(self.future,
self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
future_set_exc_info(self.future, sys.exc_info())
self.yield_point = None
self.run()
@ -1084,14 +1162,15 @@ class Runner(object):
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.result_future.set_result(_value_from_stopiteration(e))
future_set_result_unless_cancelled(self.result_future,
_value_from_stopiteration(e))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
self.result_future.set_exc_info(sys.exc_info())
future_set_exc_info(self.result_future, sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
@ -1110,19 +1189,18 @@ class Runner(object):
if isinstance(yielded, YieldPoint):
# YieldPoints are too closely coupled to the Runner to go
# through the generic convert_yielded mechanism.
self.future = TracebackFuture()
self.future = Future()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
future_set_result_unless_cancelled(self.future, yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
self.future = Future()
future_set_exc_info(self.future, sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
@ -1142,13 +1220,16 @@ class Runner(object):
try:
self.future = convert_yielded(yielded)
except BadYieldError:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
self.future = Future()
future_set_exc_info(self.future, sys.exc_info())
if not self.future.done() or self.future is moment:
if self.future is moment:
self.io_loop.add_callback(self.run)
return False
elif not self.future.done():
def inner(f):
# Break a reference cycle to speed GC.
f = None # noqa
f = None # noqa
self.run()
self.io_loop.add_future(
self.future, inner)
@ -1161,8 +1242,8 @@ class Runner(object):
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = TracebackFuture()
self.future.set_exc_info((typ, value, tb))
self.future = Future()
future_set_exc_info(self.future, (typ, value, tb))
self.run()
return True
else:
@ -1194,20 +1275,10 @@ def _argument_adapter(callback):
return wrapper
# Convert Awaitables into Futures. It is unfortunately possible
# to have infinite recursion here if those Awaitables assume that
# we're using a different coroutine runner and yield objects
# we don't understand. If that happens, the solution is to
# register that runner's yieldable objects with convert_yielded.
if sys.version_info >= (3, 3):
exec(textwrap.dedent("""
@coroutine
def _wrap_awaitable(x):
if hasattr(x, '__await__'):
x = x.__await__()
return (yield from x)
"""))
else:
# Convert Awaitables into Futures.
try:
import asyncio
except ImportError:
# Py2-compatible version for use with Cython.
# Copied from PEP 380.
@coroutine
@ -1254,6 +1325,13 @@ else:
_r = _value_from_stopiteration(_e)
break
raise Return(_r)
else:
try:
_wrap_awaitable = asyncio.ensure_future
except AttributeError:
# asyncio.ensure_future was introduced in Python 3.4.4, but
# Debian jessie still ships with 3.4.2 so try the old name.
_wrap_awaitable = getattr(asyncio, 'async')
def convert_yielded(yielded):
@ -1271,8 +1349,10 @@ def convert_yielded(yielded):
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled earlier.
if yielded is None:
if yielded is None or yielded is moment:
return moment
elif yielded is _null_future:
return _null_future
elif isinstance(yielded, (list, dict)):
return multi(yielded)
elif is_future(yielded):
@ -1285,19 +1365,3 @@ def convert_yielded(yielded):
if singledispatch is not None:
convert_yielded = singledispatch(convert_yielded)
try:
# If we can import t.p.asyncio, do it for its side effect
# (registering asyncio.Future with convert_yielded).
# It's ugly to do this here, but it prevents a cryptic
# infinite recursion in _wrap_awaitable.
# Note that even with this, asyncio integration is unlikely
# to work unless the application also configures AsyncIOLoop,
# but at least the error messages in that case are more
# comprehensible than a stack overflow.
import tornado.platform.asyncio
except ImportError:
pass
else:
# Reference the imported module to make pyflakes happy.
tornado

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2014 Facebook
#
@ -23,7 +22,8 @@ from __future__ import absolute_import, division, print_function
import re
from tornado.concurrent import Future
from tornado.concurrent import (Future, future_add_done_callback,
future_set_result_unless_cancelled)
from tornado.escape import native_str, utf8
from tornado import gen
from tornado import httputil
@ -164,7 +164,6 @@ class HTTP1Connection(httputil.HTTPConnection):
header_data = yield gen.with_timeout(
self.stream.io_loop.time() + self.params.header_timeout,
header_future,
io_loop=self.stream.io_loop,
quiet_exceptions=iostream.StreamClosedError)
except gen.TimeoutError:
self.close()
@ -224,7 +223,7 @@ class HTTP1Connection(httputil.HTTPConnection):
try:
yield gen.with_timeout(
self.stream.io_loop.time() + self._body_timeout,
body_future, self.stream.io_loop,
body_future,
quiet_exceptions=iostream.StreamClosedError)
except gen.TimeoutError:
gen_log.info("Timeout reading body from %s",
@ -251,6 +250,8 @@ class HTTP1Connection(httputil.HTTPConnection):
except httputil.HTTPInputError as e:
gen_log.info("Malformed HTTP message from %s: %s",
self.context, e)
if not self.is_client:
yield self.stream.write(b'HTTP/1.1 400 Bad Request\r\n\r\n')
self.close()
raise gen.Return(False)
finally:
@ -290,7 +291,7 @@ class HTTP1Connection(httputil.HTTPConnection):
self._close_callback = None
callback()
if not self._finish_future.done():
self._finish_future.set_result(None)
future_set_result_unless_cancelled(self._finish_future, None)
self._clear_callbacks()
def close(self):
@ -298,7 +299,7 @@ class HTTP1Connection(httputil.HTTPConnection):
self.stream.close()
self._clear_callbacks()
if not self._finish_future.done():
self._finish_future.set_result(None)
future_set_result_unless_cancelled(self._finish_future, None)
def detach(self):
"""Take control of the underlying stream.
@ -312,7 +313,7 @@ class HTTP1Connection(httputil.HTTPConnection):
stream = self.stream
self.stream = None
if not self._finish_future.done():
self._finish_future.set_result(None)
future_set_result_unless_cancelled(self._finish_future, None)
return stream
def set_body_timeout(self, timeout):
@ -349,19 +350,22 @@ class HTTP1Connection(httputil.HTTPConnection):
# self._request_start_line.version or
# start_line.version?
self._request_start_line.version == 'HTTP/1.1' and
# 304 responses have no body (not even a zero-length body), and so
# should not have either Content-Length or Transfer-Encoding.
# headers.
# 1xx, 204 and 304 responses have no body (not even a zero-length
# body), and so should not have either Content-Length or
# Transfer-Encoding headers.
start_line.code not in (204, 304) and
(start_line.code < 100 or start_line.code >= 200) and
# No need to chunk the output if a Content-Length is specified.
'Content-Length' not in headers and
# Applications are discouraged from touching Transfer-Encoding,
# but if they do, leave it alone.
'Transfer-Encoding' not in headers)
# If connection to a 1.1 client will be closed, inform client
if (self._request_start_line.version == 'HTTP/1.1' and self._disconnect_on_finish):
headers['Connection'] = 'close'
# If a 1.0 client asked for keep-alive, add the header.
if (self._request_start_line.version == 'HTTP/1.0' and
(self._request_headers.get('Connection', '').lower() ==
'keep-alive')):
self._request_headers.get('Connection', '').lower() == 'keep-alive'):
headers['Connection'] = 'Keep-Alive'
if self._chunking_output:
headers['Transfer-Encoding'] = 'chunked'
@ -419,7 +423,7 @@ class HTTP1Connection(httputil.HTTPConnection):
def write(self, chunk, callback=None):
"""Implements `.HTTPConnection.write`.
For backwards compatibility is is allowed but deprecated to
For backwards compatibility it is allowed but deprecated to
skip `write_headers` and instead call `write()` with a
pre-encoded header block.
"""
@ -464,7 +468,7 @@ class HTTP1Connection(httputil.HTTPConnection):
if self._pending_write is None:
self._finish_request(None)
else:
self._pending_write.add_done_callback(self._finish_request)
future_add_done_callback(self._pending_write, self._finish_request)
def _on_write_complete(self, future):
exc = future.exception()
@ -477,7 +481,7 @@ class HTTP1Connection(httputil.HTTPConnection):
if self._write_future is not None:
future = self._write_future
self._write_future = None
future.set_result(None)
future_set_result_unless_cancelled(future, None)
def _can_keep_alive(self, start_line, headers):
if self.params.no_keep_alive:
@ -504,7 +508,7 @@ class HTTP1Connection(httputil.HTTPConnection):
# default state for the next request.
self.stream.set_nodelay(False)
if not self._finish_future.done():
self._finish_future.set_result(None)
future_set_result_unless_cancelled(self._finish_future, None)
def _parse_headers(self, data):
# The lstrip removes newlines that some implementations sometimes
@ -515,12 +519,7 @@ class HTTP1Connection(httputil.HTTPConnection):
# RFC 7230 section allows for both CRLF and bare LF.
eol = data.find("\n")
start_line = data[:eol].rstrip("\r")
try:
headers = httputil.HTTPHeaders.parse(data[eol:])
except ValueError:
# probably form split() if there was no ':' in the line
raise httputil.HTTPInputError("Malformed HTTP headers: %r" %
data[eol:100])
headers = httputil.HTTPHeaders.parse(data[eol:])
return start_line, headers
def _read_body(self, code, headers, delegate):
@ -592,6 +591,9 @@ class HTTP1Connection(httputil.HTTPConnection):
chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64)
chunk_len = int(chunk_len.strip(), 16)
if chunk_len == 0:
crlf = yield self.stream.read_bytes(2)
if crlf != b'\r\n':
raise httputil.HTTPInputError("improperly terminated chunked request")
return
total_size += chunk_len
if total_size > self._max_body_size:

View file

@ -44,9 +44,9 @@ import functools
import time
import weakref
from tornado.concurrent import TracebackFuture
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado.escape import utf8, native_str
from tornado import httputil, stack_context
from tornado import gen, httputil, stack_context
from tornado.ioloop import IOLoop
from tornado.util import Configurable
@ -54,8 +54,10 @@ from tornado.util import Configurable
class HTTPClient(object):
"""A blocking HTTP client.
This interface is provided for convenience and testing; most applications
that are running an IOLoop will want to use `AsyncHTTPClient` instead.
This interface is provided to make it easier to share code between
synchronous and asynchronous applications. Applications that are
running an `.IOLoop` must use `AsyncHTTPClient` instead.
Typical usage looks like this::
http_client = httpclient.HTTPClient()
@ -70,12 +72,26 @@ class HTTPClient(object):
# Other errors are possible, such as IOError.
print("Error: " + str(e))
http_client.close()
.. versionchanged:: 5.0
Due to limitations in `asyncio`, it is no longer possible to
use the synchronous ``HTTPClient`` while an `.IOLoop` is running.
Use `AsyncHTTPClient` instead.
"""
def __init__(self, async_client_class=None, **kwargs):
# Initialize self._closed at the beginning of the constructor
# so that an exception raised here doesn't lead to confusing
# failures in __del__.
self._closed = True
self._io_loop = IOLoop(make_current=False)
if async_client_class is None:
async_client_class = AsyncHTTPClient
self._async_client = async_client_class(self._io_loop, **kwargs)
# Create the client while our IOLoop is "current", without
# clobbering the thread's real current IOLoop (if any).
self._async_client = self._io_loop.run_sync(
gen.coroutine(lambda: async_client_class(**kwargs)))
self._closed = False
def __del__(self):
@ -120,12 +136,12 @@ class AsyncHTTPClient(Configurable):
The constructor for this class is magic in several respects: It
actually creates an instance of an implementation-specific
subclass, and instances are reused as a kind of pseudo-singleton
(one per `.IOLoop`). The keyword argument ``force_instance=True``
can be used to suppress this singleton behavior. Unless
``force_instance=True`` is used, no arguments other than
``io_loop`` should be passed to the `AsyncHTTPClient` constructor.
The implementation subclass as well as arguments to its
constructor can be set with the static method `configure()`
(one per `.IOLoop`). The keyword argument ``force_instance=True``
can be used to suppress this singleton behavior. Unless
``force_instance=True`` is used, no arguments should be passed to
the `AsyncHTTPClient` constructor. The implementation subclass as
well as arguments to its constructor can be set with the static
method `configure()`
All `AsyncHTTPClient` implementations support a ``defaults``
keyword argument, which can be used to set default values for
@ -137,8 +153,9 @@ class AsyncHTTPClient(Configurable):
client = AsyncHTTPClient(force_instance=True,
defaults=dict(user_agent="MyUserAgent"))
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
@classmethod
def configurable_base(cls):
@ -156,16 +173,15 @@ class AsyncHTTPClient(Configurable):
setattr(cls, attr_name, weakref.WeakKeyDictionary())
return getattr(cls, attr_name)
def __new__(cls, io_loop=None, force_instance=False, **kwargs):
io_loop = io_loop or IOLoop.current()
def __new__(cls, force_instance=False, **kwargs):
io_loop = IOLoop.current()
if force_instance:
instance_cache = None
else:
instance_cache = cls._async_clients()
if instance_cache is not None and io_loop in instance_cache:
return instance_cache[io_loop]
instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop,
**kwargs)
instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs)
# Make sure the instance knows which cache to remove itself from.
# It can't simply call _async_clients() because we may be in
# __new__(AsyncHTTPClient) but instance.__class__ may be
@ -175,8 +191,8 @@ class AsyncHTTPClient(Configurable):
instance_cache[instance.io_loop] = instance
return instance
def initialize(self, io_loop, defaults=None):
self.io_loop = io_loop
def initialize(self, defaults=None):
self.io_loop = IOLoop.current()
self.defaults = dict(HTTPRequest._DEFAULTS)
if defaults is not None:
self.defaults.update(defaults)
@ -235,7 +251,7 @@ class AsyncHTTPClient(Configurable):
# where normal dicts get converted to HTTPHeaders objects.
request.headers = httputil.HTTPHeaders(request.headers)
request = _RequestProxy(request, self.defaults)
future = TracebackFuture()
future = Future()
if callback is not None:
callback = stack_context.wrap(callback)
@ -256,7 +272,7 @@ class AsyncHTTPClient(Configurable):
if raise_error and response.error:
future.set_exception(response.error)
else:
future.set_result(response)
future_set_result_unless_cancelled(future, response)
self.fetch_impl(request, handle_response)
return future
@ -318,8 +334,8 @@ class HTTPRequest(object):
ssl_options=None):
r"""All parameters except ``url`` are optional.
:arg string url: URL to fetch
:arg string method: HTTP method, e.g. "GET" or "POST"
:arg str url: URL to fetch
:arg str method: HTTP method, e.g. "GET" or "POST"
:arg headers: Additional HTTP headers to pass on the request
:type headers: `~tornado.httputil.HTTPHeaders` or `dict`
:arg body: HTTP request body as a string (byte or unicode; if unicode
@ -335,9 +351,9 @@ class HTTPRequest(object):
to pass a ``Content-Length`` in the headers as otherwise chunked
encoding will be used, and many servers do not support chunked
encoding on requests. New in Tornado 4.0
:arg string auth_username: Username for HTTP authentication
:arg string auth_password: Password for HTTP authentication
:arg string auth_mode: Authentication mode; default is "basic".
:arg str auth_username: Username for HTTP authentication
:arg str auth_password: Password for HTTP authentication
:arg str auth_mode: Authentication mode; default is "basic".
Allowed values are implementation-defined; ``curl_httpclient``
supports "basic" and "digest"; ``simple_httpclient`` only supports
"basic"
@ -350,19 +366,19 @@ class HTTPRequest(object):
:arg bool follow_redirects: Should redirects be followed automatically
or return the 3xx response? Default True.
:arg int max_redirects: Limit for ``follow_redirects``, default 5.
:arg string user_agent: String to send as ``User-Agent`` header
:arg str user_agent: String to send as ``User-Agent`` header
:arg bool decompress_response: Request a compressed response from
the server and decompress it after downloading. Default is True.
New in Tornado 4.0.
:arg bool use_gzip: Deprecated alias for ``decompress_response``
since Tornado 4.0.
:arg string network_interface: Network interface to use for request.
:arg str network_interface: Network interface to use for request.
``curl_httpclient`` only; see note below.
:arg callable streaming_callback: If set, ``streaming_callback`` will
:arg collections.abc.Callable streaming_callback: If set, ``streaming_callback`` will
be run with each chunk of data as it is received, and
``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
the final response.
:arg callable header_callback: If set, ``header_callback`` will
:arg collections.abc.Callable header_callback: If set, ``header_callback`` will
be run with each header line as it is received (including the
first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
containing only ``\r\n``. All lines include the trailing newline
@ -370,28 +386,28 @@ class HTTPRequest(object):
response. This is most useful in conjunction with
``streaming_callback``, because it's the only way to get access to
header data while the request is in progress.
:arg callable prepare_curl_callback: If set, will be called with
:arg collections.abc.Callable prepare_curl_callback: If set, will be called with
a ``pycurl.Curl`` object to allow the application to make additional
``setopt`` calls.
:arg string proxy_host: HTTP proxy hostname. To use proxies,
:arg str proxy_host: HTTP proxy hostname. To use proxies,
``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``,
``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are
currently only supported with ``curl_httpclient``.
:arg int proxy_port: HTTP proxy port
:arg string proxy_username: HTTP proxy username
:arg string proxy_password: HTTP proxy password
:arg string proxy_auth_mode: HTTP proxy Authentication mode;
:arg str proxy_username: HTTP proxy username
:arg str proxy_password: HTTP proxy password
:arg str proxy_auth_mode: HTTP proxy Authentication mode;
default is "basic". supports "basic" and "digest"
:arg bool allow_nonstandard_methods: Allow unknown values for ``method``
argument? Default is False.
:arg bool validate_cert: For HTTPS requests, validate the server's
certificate? Default is True.
:arg string ca_certs: filename of CA certificates in PEM format,
:arg str ca_certs: filename of CA certificates in PEM format,
or None to use defaults. See note below when used with
``curl_httpclient``.
:arg string client_key: Filename for client SSL key, if any. See
:arg str client_key: Filename for client SSL key, if any. See
note below when used with ``curl_httpclient``.
:arg string client_cert: Filename for client SSL certificate, if any.
:arg str client_cert: Filename for client SSL certificate, if any.
See note below when used with ``curl_httpclient``.
:arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in
``simple_httpclient`` (unsupported by ``curl_httpclient``).
@ -654,6 +670,8 @@ def main():
define("print_body", type=bool, default=True)
define("follow_redirects", type=bool, default=True)
define("validate_cert", type=bool, default=True)
define("proxy_host", type=str)
define("proxy_port", type=int)
args = parse_command_line()
client = HTTPClient()
for arg in args:
@ -661,6 +679,8 @@ def main():
response = client.fetch(arg,
follow_redirects=options.follow_redirects,
validate_cert=options.validate_cert,
proxy_host=options.proxy_host,
proxy_port=options.proxy_port,
)
except HTTPError as e:
if e.response is not None:

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
@ -77,7 +76,7 @@ class HTTPServer(TCPServer, Configurable,
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
os.path.join(data_dir, "mydomain.key"))
HTTPServer(applicaton, ssl_options=ssl_ctx)
HTTPServer(application, ssl_options=ssl_ctx)
`HTTPServer` initialization follows one of three patterns (the
initialization methods are defined on `tornado.tcpserver.TCPServer`):
@ -134,6 +133,9 @@ class HTTPServer(TCPServer, Configurable,
.. versionchanged:: 4.5
Added the ``trusted_downstream`` argument.
.. versionchanged:: 5.0
The ``io_loop`` argument has been removed.
"""
def __init__(self, *args, **kwargs):
# Ignore args to __init__; real initialization belongs in
@ -143,7 +145,7 @@ class HTTPServer(TCPServer, Configurable,
# completely)
pass
def initialize(self, request_callback, no_keep_alive=False, io_loop=None,
def initialize(self, request_callback, no_keep_alive=False,
xheaders=False, ssl_options=None, protocol=None,
decompress_request=False,
chunk_size=None, max_header_size=None,
@ -151,7 +153,6 @@ class HTTPServer(TCPServer, Configurable,
max_body_size=None, max_buffer_size=None,
trusted_downstream=None):
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.xheaders = xheaders
self.protocol = protocol
self.conn_params = HTTP1ConnectionParameters(
@ -162,7 +163,7 @@ class HTTPServer(TCPServer, Configurable,
max_body_size=max_body_size,
body_timeout=body_timeout,
no_keep_alive=no_keep_alive)
TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options,
TCPServer.__init__(self, ssl_options=ssl_options,
max_buffer_size=max_buffer_size,
read_chunk_size=chunk_size)
self._connections = set()
@ -285,6 +286,10 @@ class _HTTPRequestContext(object):
proto_header = headers.get(
"X-Scheme", headers.get("X-Forwarded-Proto",
self.protocol))
if proto_header:
# use only the last proto entry if there is more than one
# TODO: support trusting mutiple layers of proxied protocol
proto_header = proto_header.split(',')[-1].strip()
if proto_header in ("http", "https"):
self.protocol = proto_header

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
@ -61,7 +60,7 @@ except ImportError:
SSLError = _SSLError # type: ignore
try:
import typing
import typing # noqa: F401
except ImportError:
pass
@ -184,11 +183,16 @@ class HTTPHeaders(collections.MutableMapping):
"""
if line[0].isspace():
# continuation of a multi-line header
if self._last_key is None:
raise HTTPInputError("first header line cannot start with whitespace")
new_part = ' ' + line.lstrip()
self._as_list[self._last_key][-1] += new_part
self._dict[self._last_key] += new_part
else:
name, value = line.split(":", 1)
try:
name, value = line.split(":", 1)
except ValueError:
raise HTTPInputError("no colon in header line")
self.add(name, value.strip())
@classmethod
@ -198,6 +202,12 @@ class HTTPHeaders(collections.MutableMapping):
>>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n")
>>> sorted(h.items())
[('Content-Length', '42'), ('Content-Type', 'text/html')]
.. versionchanged:: 5.1
Raises `HTTPInputError` on malformed headers instead of a
mix of `KeyError`, and `ValueError`.
"""
h = cls()
for line in _CRLF_RE.split(headers):
@ -467,8 +477,7 @@ class HTTPServerRequest(object):
def __repr__(self):
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
return "%s(%s, headers=%s)" % (
self.__class__.__name__, args, dict(self.headers))
return "%s(%s)" % (self.__class__.__name__, args)
class HTTPInputError(Exception):
@ -829,6 +838,8 @@ def parse_request_start_line(line):
try:
method, path, version = line.split(" ")
except ValueError:
# https://tools.ietf.org/html/rfc7230#section-3.1.1
# invalid request-line SHOULD respond with a 400 (Bad Request)
raise HTTPInputError("Malformed HTTP request line")
if not re.match(r"^HTTP/1\.[0-9]$", version):
raise HTTPInputError(
@ -940,6 +951,16 @@ def split_host_and_port(netloc):
return (host, port)
def qs_to_qsl(qs):
"""Generator converting a result of ``parse_qs`` back to name-value pairs.
.. versionadded:: 5.0
"""
for k, vs in qs.items():
for v in vs:
yield (k, v)
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
_nulljoin = ''.join

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
@ -16,14 +15,19 @@
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
On Python 3, `.IOLoop` is a wrapper around the `asyncio` event loop.
Typical applications will use a single `IOLoop` object, accessed via
`IOLoop.current` class method. The `IOLoop.start` method (or
equivalently, `asyncio.AbstractEventLoop.run_forever`) should usually
be called at the end of the ``main()`` function. Atypical applications
may use more than one `IOLoop`, such as one `IOLoop` per thread, or
per `unittest` case.
In addition to I/O events, the `IOLoop` can also schedule time-based
events. `IOLoop.add_timeout` is a non-blocking alternative to
`time.sleep`.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function
@ -44,39 +48,48 @@ import time
import traceback
import math
from tornado.concurrent import TracebackFuture, is_future
from tornado.concurrent import Future, is_future, chain_future, future_set_exc_info, future_add_done_callback # noqa: E501
from tornado.log import app_log, gen_log
from tornado.platform.auto import set_close_exec, Waker
from tornado import stack_context
from tornado.util import PY3, Configurable, errno_from_exception, timedelta_to_seconds
from tornado.util import (
PY3, Configurable, errno_from_exception, timedelta_to_seconds,
TimeoutError, unicode_type, import_object,
)
try:
import signal
except ImportError:
signal = None
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
ThreadPoolExecutor = None
if PY3:
import _thread as thread
else:
import thread
try:
import asyncio
except ImportError:
asyncio = None
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
On Python 3, `IOLoop` is a wrapper around the `asyncio` event
loop. On Python 2, it uses ``epoll`` (Linux) or ``kqueue`` (BSD
and Mac OS X) if they are available, or else we fall back on
select(). If you are implementing a system that needs to handle
thousands of simultaneous connections, you should use a system
that supports either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
@ -84,9 +97,18 @@ class IOLoop(Configurable):
import errno
import functools
import tornado.ioloop
import socket
import tornado.ioloop
from tornado import gen
from tornado.iostream import IOStream
@gen.coroutine
def handle_connection(connection, address):
stream = IOStream(connection)
message = yield stream.read_until_close()
print("message from client:", message.decode().strip())
def connection_ready(sock, fd, events):
while True:
try:
@ -102,7 +124,7 @@ class IOLoop(Configurable):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.bind(("", 8888))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
@ -121,9 +143,26 @@ class IOLoop(Configurable):
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
In general, an `IOLoop` cannot survive a fork or be shared across
processes in any way. When multiple processes are being used, each
process should create its own `IOLoop`, which also implies that
any objects which depend on the `IOLoop` (such as
`.AsyncHTTPClient`) must also be created in the child processes.
As a guideline, anything that starts processes (including the
`tornado.process` and `multiprocessing` modules) should do so as
early as possible, ideally the first thing the application does
after loading its configuration in ``main()``.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
.. versionchanged:: 5.0
Uses the `asyncio` event loop by default. The
``IOLoop.configure`` method cannot be used on Python 3 except
to redundantly specify the `asyncio` event loop.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
@ -141,54 +180,75 @@ class IOLoop(Configurable):
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
# In Python 2, _current.instance points to the current IOLoop.
_current = threading.local()
# In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops.
_ioloop_for_asyncio = dict()
@classmethod
def configure(cls, impl, **kwargs):
if asyncio is not None:
from tornado.platform.asyncio import BaseAsyncIOLoop
if isinstance(impl, (str, unicode_type)):
impl = import_object(impl)
if not issubclass(impl, BaseAsyncIOLoop):
raise RuntimeError(
"only AsyncIOLoop is allowed when asyncio is available")
super(IOLoop, cls).configure(impl, **kwargs)
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
"""Deprecated alias for `IOLoop.current()`.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. In most other cases, it is better to use `current()`
to get the current thread's `IOLoop`.
.. versionchanged:: 5.0
Previously, this method returned a global singleton
`IOLoop`, in contrast with the per-thread `IOLoop` returned
by `current()`. In nearly all cases the two were the same
(when they differed, it was generally used from non-Tornado
threads to communicate back to the main thread's `IOLoop`).
This distinction is not present in `asyncio`, so in order
to facilitate integration with that package `instance()`
was changed to be an alias to `current()`. Applications
using the cross-thread communications aspect of
`instance()` should instead set their own global variable
to point to the `IOLoop` they want to use.
.. deprecated:: 5.0
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
return IOLoop.current()
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
"""Deprecated alias for `make_current()`.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
.. versionchanged:: 5.0
When using an `IOLoop` subclass, `install` must be called prior
to creating any objects that implicitly create their own
`IOLoop` (e.g., :class:`tornado.httpclient.AsyncHTTPClient`).
Previously, this method would set this `IOLoop` as the
global singleton used by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`, `install()`
is an alias for `make_current()`.
.. deprecated:: 5.0
"""
assert not IOLoop.initialized()
IOLoop._instance = self
self.make_current()
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
"""Deprecated alias for `clear_current()`.
.. versionchanged:: 5.0
Previously, this method would clear the `IOLoop` used as
the global singleton by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`,
`clear_instance()` is an alias for `clear_current()`.
.. deprecated:: 5.0
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
IOLoop.clear_current()
@staticmethod
def current(instance=True):
@ -196,22 +256,42 @@ class IOLoop(Configurable):
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
main thread's `IOLoop`, creating one if necessary) if ``instance``
is true.
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
no current `IOLoop` and ``instance`` is true, creates one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
.. versionchanged:: 5.0
On Python 3, control of the current `IOLoop` is delegated
to `asyncio`, with this and other methods as pass-through accessors.
The ``instance`` argument now controls whether an `IOLoop`
is created automatically when there is none, instead of
whether we fall back to `IOLoop.instance()` (which is now
an alias for this method). ``instance=False`` is deprecated,
since even if we do not create an `IOLoop`, this method
may initialize the asyncio loop.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
return IOLoop.instance()
if asyncio is None:
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
current = IOLoop()
if IOLoop._current.instance is not current:
raise RuntimeError("new IOLoop did not become current")
else:
try:
loop = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
if not instance:
return None
raise
try:
return IOLoop._ioloop_for_asyncio[loop]
except KeyError:
if instance:
from tornado.platform.asyncio import AsyncIOMainLoop
current = AsyncIOMainLoop(make_current=True)
else:
current = None
return current
def make_current(self):
@ -226,12 +306,38 @@ class IOLoop(Configurable):
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
.. versionchanged:: 5.0
This method also sets the current `asyncio` event loop.
"""
# The asyncio event loops override this method.
assert asyncio is None
old = getattr(IOLoop._current, "instance", None)
if old is not None:
old.clear_current()
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
"""Clears the `IOLoop` for the current thread.
Intended primarily for use by test frameworks in between tests.
.. versionchanged:: 5.0
This method also clears the current `asyncio` event loop.
"""
old = IOLoop.current(instance=False)
if old is not None:
old._clear_current_hook()
if asyncio is None:
IOLoop._current.instance = None
def _clear_current_hook(self):
"""Instance method called when an IOLoop ceases to be current.
May be overridden by subclasses as a counterpart to make_current.
"""
pass
@classmethod
def configurable_base(cls):
@ -239,22 +345,19 @@ class IOLoop(Configurable):
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
if asyncio is not None:
from tornado.platform.asyncio import AsyncIOLoop
return AsyncIOLoop
return PollIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
if IOLoop.current(instance=False) is not None:
current = IOLoop.current(instance=False)
# AsyncIO loops can already be current by this point.
if current is not None and current is not self:
raise RuntimeError("current IOLoop already exists")
self.make_current()
@ -333,6 +436,11 @@ class IOLoop(Configurable):
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
.. deprecated:: 5.0
Not implemented on the `asyncio` event loop. Use the environment
variable ``PYTHONASYNCIODEBUG=1`` instead.
"""
raise NotImplementedError()
@ -342,6 +450,11 @@ class IOLoop(Configurable):
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
.. deprecated:: 5.0
Not implemented on the `asyncio` event loop. Use the environment
variable ``PYTHONASYNCIODEBUG=1`` instead.
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
@ -414,7 +527,7 @@ class IOLoop(Configurable):
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
a `tornado.util.TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@ -428,6 +541,9 @@ class IOLoop(Configurable):
.. versionchanged:: 4.3
Returning a non-``None``, non-yieldable value is now an error.
.. versionchanged:: 5.0
If a timeout occurs, the ``func`` coroutine will be cancelled.
"""
future_cell = [None]
@ -438,22 +554,29 @@ class IOLoop(Configurable):
from tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
future_cell[0] = Future()
future_set_exc_info(future_cell[0], sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0] = Future()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
def timeout_callback():
# If we can cancel the future, do so and wait on it. If not,
# Just stop the loop and return with the task still pending.
# (If we neither cancel nor wait for the task, a warning
# will be logged).
if not future_cell[0].cancel():
self.stop()
timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
if future_cell[0].cancelled() or not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
@ -593,8 +716,39 @@ class IOLoop(Configurable):
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
future_add_done_callback(
future, lambda future: self.add_callback(callback, future))
def run_in_executor(self, executor, func, *args):
"""Runs a function in a ``concurrent.futures.Executor``. If
``executor`` is ``None``, the IO loop's default executor will be used.
Use `functools.partial` to pass keyword arguments to ``func``.
.. versionadded:: 5.0
"""
if ThreadPoolExecutor is None:
raise RuntimeError(
"concurrent.futures is required to use IOLoop.run_in_executor")
if executor is None:
if not hasattr(self, '_executor'):
from tornado.process import cpu_count
self._executor = ThreadPoolExecutor(max_workers=(cpu_count() * 5))
executor = self._executor
c_future = executor.submit(func, *args)
# Concurrent Futures are not usable with await. Wrap this in a
# Tornado Future instead, using self.add_future for thread-safety.
t_future = Future()
self.add_future(c_future, lambda f: chain_future(f, t_future))
return t_future
def set_default_executor(self, executor):
"""Sets the default executor to use with :meth:`run_in_executor`.
.. versionadded:: 5.0
"""
self._executor = executor
def _run_callback(self, callback):
"""Runs a callback with error handling.
@ -701,6 +855,7 @@ class PollIOLoop(IOLoop):
self._stopped = False
self._closing = False
self._thread_ident = None
self._pid = os.getpid()
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
@ -711,6 +866,22 @@ class PollIOLoop(IOLoop):
lambda fd, events: self._waker.consume(),
self.READ)
@classmethod
def configurable_base(cls):
return PollIOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def close(self, all_fds=False):
self._closing = True
self.remove_handler(self._waker.fileno())
@ -721,6 +892,8 @@ class PollIOLoop(IOLoop):
self._impl.close()
self._callbacks = None
self._timeouts = None
if hasattr(self, '_executor'):
self._executor.shutdown()
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
@ -753,12 +926,15 @@ class PollIOLoop(IOLoop):
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
if os.getpid() != self._pid:
raise RuntimeError("Cannot share PollIOLoops across processes")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
old_current = IOLoop.current(instance=False)
if old_current is not self:
self.make_current()
self._thread_ident = thread.get_ident()
self._running = True
@ -901,7 +1077,10 @@ class PollIOLoop(IOLoop):
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_current is None:
IOLoop.clear_current()
elif old_current is not self:
old_current.make_current()
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
@ -987,20 +1166,23 @@ class PeriodicCallback(object):
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def __init__(self, callback, callback_time, io_loop=None):
def __init__(self, callback, callback_time):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
# Looking up the IOLoop here allows to first instantiate the
# PeriodicCallback in another thread, then start it using
# IOLoop.add_callback().
self.io_loop = IOLoop.current()
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
@ -28,16 +27,17 @@ from __future__ import absolute_import, division, print_function
import collections
import errno
import io
import numbers
import os
import socket
import sys
import re
from tornado.concurrent import TracebackFuture
from tornado.concurrent import Future
from tornado import ioloop
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError, _client_ssl_defaults, _server_ssl_defaults
from tornado.netutil import ssl_wrap_socket, _client_ssl_defaults, _server_ssl_defaults
from tornado import stack_context
from tornado.util import errno_from_exception
@ -66,7 +66,7 @@ _ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE,
errno.ETIMEDOUT)
if hasattr(errno, "WSAECONNRESET"):
_ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) # type: ignore
_ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) # type: ignore # noqa: E501
if sys.platform == 'darwin':
# OSX appears to have a race condition that causes send(2) to return
@ -117,6 +117,96 @@ class StreamBufferFullError(Exception):
"""
class _StreamBuffer(object):
"""
A specialized buffer that tries to avoid copies when large pieces
of data are encountered.
"""
def __init__(self):
# A sequence of (False, bytearray) and (True, memoryview) objects
self._buffers = collections.deque()
# Position in the first buffer
self._first_pos = 0
self._size = 0
def __len__(self):
return self._size
# Data above this size will be appended separately instead
# of extending an existing bytearray
_large_buf_threshold = 2048
def append(self, data):
"""
Append the given piece of data (should be a buffer-compatible object).
"""
size = len(data)
if size > self._large_buf_threshold:
if not isinstance(data, memoryview):
data = memoryview(data)
self._buffers.append((True, data))
elif size > 0:
if self._buffers:
is_memview, b = self._buffers[-1]
new_buf = is_memview or len(b) >= self._large_buf_threshold
else:
new_buf = True
if new_buf:
self._buffers.append((False, bytearray(data)))
else:
b += data
self._size += size
def peek(self, size):
"""
Get a view over at most ``size`` bytes (possibly fewer) at the
current buffer position.
"""
assert size > 0
try:
is_memview, b = self._buffers[0]
except IndexError:
return memoryview(b'')
pos = self._first_pos
if is_memview:
return b[pos:pos + size]
else:
return memoryview(b)[pos:pos + size]
def advance(self, size):
"""
Advance the current buffer position by ``size`` bytes.
"""
assert 0 < size <= self._size
self._size -= size
pos = self._first_pos
buffers = self._buffers
while buffers and size > 0:
is_large, b = buffers[0]
b_remain = len(b) - size - pos
if b_remain <= 0:
buffers.popleft()
size -= len(b) - pos
pos = 0
elif is_large:
pos += size
size = 0
else:
# Amortized O(1) shrink for Python 2
pos += size
if len(b) <= 2 * pos:
del b[:pos]
pos = 0
size = 0
assert size == 0
self._first_pos = pos
class BaseIOStream(object):
"""A utility class to write to and read from a non-blocking file or socket.
@ -135,12 +225,10 @@ class BaseIOStream(object):
Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
`read_from_fd`, and optionally `get_fd_error`.
"""
def __init__(self, io_loop=None, max_buffer_size=None,
def __init__(self, max_buffer_size=None,
read_chunk_size=None, max_write_buffer_size=None):
"""`BaseIOStream` constructor.
:arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`.
Deprecated since Tornado 4.1.
:arg max_buffer_size: Maximum amount of incoming data to buffer;
defaults to 100MB.
:arg read_chunk_size: Amount of data to read at one time from the
@ -151,8 +239,11 @@ class BaseIOStream(object):
.. versionchanged:: 4.0
Add the ``max_write_buffer_size`` parameter. Changed default
``read_chunk_size`` to 64KB.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been
removed.
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
self.io_loop = ioloop.IOLoop.current()
self.max_buffer_size = max_buffer_size or 104857600
# A chunk size that is too close to max_buffer_size can cause
# spurious failures.
@ -163,13 +254,11 @@ class BaseIOStream(object):
self._read_buffer = bytearray()
self._read_buffer_pos = 0
self._read_buffer_size = 0
self._write_buffer = bytearray()
self._write_buffer_pos = 0
self._write_buffer_size = 0
self._write_buffer_frozen = False
self._user_read_buffer = False
self._after_user_read_buffer = None
self._write_buffer = _StreamBuffer()
self._total_write_index = 0
self._total_write_done_index = 0
self._pending_writes_while_frozen = []
self._read_delimiter = None
self._read_regex = None
self._read_max_bytes = None
@ -213,13 +302,18 @@ class BaseIOStream(object):
"""
raise NotImplementedError()
def read_from_fd(self):
def read_from_fd(self, buf):
"""Attempts to read from the underlying file.
Returns ``None`` if there was nothing to read (the socket
returned `~errno.EWOULDBLOCK` or equivalent), otherwise
returns the data. When possible, should return no more than
``self.read_chunk_size`` bytes at a time.
Reads up to ``len(buf)`` bytes, storing them in the buffer.
Returns the number of bytes read. Returns None if there was
nothing to read (the socket returned `~errno.EWOULDBLOCK` or
equivalent), and zero on EOF.
.. versionchanged:: 5.0
Interface redesigned to take a buffer and return a number
of bytes instead of a freshly-allocated object.
"""
raise NotImplementedError()
@ -257,7 +351,7 @@ class BaseIOStream(object):
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
self.close(exc_info=e)
return future
except:
if future is not None:
@ -290,7 +384,7 @@ class BaseIOStream(object):
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
self.close(exc_info=e)
return future
except:
if future is not None:
@ -328,6 +422,50 @@ class BaseIOStream(object):
raise
return future
def read_into(self, buf, callback=None, partial=False):
"""Asynchronously read a number of bytes.
``buf`` must be a writable buffer into which data will be read.
If a callback is given, it will be run with the number of read
bytes as an argument; if not, this method returns a `.Future`.
If ``partial`` is true, the callback is run as soon as any bytes
have been read. Otherwise, it is run when the ``buf`` has been
entirely filled with read data.
.. versionadded:: 5.0
"""
future = self._set_read_callback(callback)
# First copy data already in read buffer
available_bytes = self._read_buffer_size
n = len(buf)
if available_bytes >= n:
end = self._read_buffer_pos + n
buf[:] = memoryview(self._read_buffer)[self._read_buffer_pos:end]
del self._read_buffer[:end]
self._after_user_read_buffer = self._read_buffer
elif available_bytes > 0:
buf[:available_bytes] = memoryview(self._read_buffer)[self._read_buffer_pos:]
# Set up the supplied buffer as our temporary read buffer.
# The original (if it had any data remaining) has been
# saved for later.
self._user_read_buffer = True
self._read_buffer = buf
self._read_buffer_pos = 0
self._read_buffer_size = available_bytes
self._read_bytes = n
self._read_partial = partial
try:
self._try_inline_read()
except:
if future is not None:
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_until_close(self, callback=None, streaming_callback=None):
"""Asynchronously reads all data from the socket until it is closed.
@ -387,24 +525,20 @@ class BaseIOStream(object):
self._check_closed()
if data:
if (self.max_write_buffer_size is not None and
self._write_buffer_size + len(data) > self.max_write_buffer_size):
len(self._write_buffer) + len(data) > self.max_write_buffer_size):
raise StreamBufferFullError("Reached maximum write buffer size")
if self._write_buffer_frozen:
self._pending_writes_while_frozen.append(data)
else:
self._write_buffer += data
self._write_buffer_size += len(data)
self._write_buffer.append(data)
self._total_write_index += len(data)
if callback is not None:
self._write_callback = stack_context.wrap(callback)
future = None
else:
future = TracebackFuture()
future = Future()
future.add_done_callback(lambda f: f.exception())
self._write_futures.append((self._total_write_index, future))
if not self._connecting:
self._handle_write()
if self._write_buffer_size:
if self._write_buffer:
self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener()
return future
@ -428,10 +562,14 @@ class BaseIOStream(object):
"""
if not self.closed():
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if any(exc_info):
if isinstance(exc_info, tuple):
self.error = exc_info[1]
elif isinstance(exc_info, BaseException):
self.error = exc_info
else:
exc_info = sys.exc_info()
if any(exc_info):
self.error = exc_info[1]
if self._read_until_close:
if (self._streaming_callback is not None and
self._read_buffer_size):
@ -463,6 +601,7 @@ class BaseIOStream(object):
self._ssl_connect_future = None
for future in futures:
future.set_exception(StreamClosedError(real_error=self.error))
future.exception()
if self._close_callback is not None:
cb = self._close_callback
self._close_callback = None
@ -473,7 +612,6 @@ class BaseIOStream(object):
# if the IOStream object is kept alive by a reference cycle.
# TODO: Clear the read buffer too; it currently breaks some tests.
self._write_buffer = None
self._write_buffer_size = 0
def reading(self):
"""Returns true if we are currently reading from the stream."""
@ -481,7 +619,7 @@ class BaseIOStream(object):
def writing(self):
"""Returns true if we are currently writing to the stream."""
return self._write_buffer_size > 0
return bool(self._write_buffer)
def closed(self):
"""Returns true if the stream has been closed."""
@ -548,11 +686,11 @@ class BaseIOStream(object):
self.io_loop.update_handler(self.fileno(), self._state)
except UnsatisfiableReadError as e:
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
except Exception:
self.close(exc_info=e)
except Exception as e:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close(exc_info=True)
self.close(exc_info=e)
raise
def _run_callback(self, callback, *args):
@ -560,14 +698,14 @@ class BaseIOStream(object):
self._pending_callbacks -= 1
try:
return callback(*args)
except Exception:
except Exception as e:
app_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close(exc_info=True)
self.close(exc_info=e)
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
@ -672,10 +810,19 @@ class BaseIOStream(object):
if callback is not None:
self._read_callback = stack_context.wrap(callback)
else:
self._read_future = TracebackFuture()
self._read_future = Future()
return self._read_future
def _run_read_callback(self, size, streaming):
if self._user_read_buffer:
self._read_buffer = self._after_user_read_buffer or bytearray()
self._after_user_read_buffer = None
self._read_buffer_pos = 0
self._read_buffer_size = len(self._read_buffer)
self._user_read_buffer = False
result = size
else:
result = self._consume(size)
if streaming:
callback = self._streaming_callback
else:
@ -685,10 +832,11 @@ class BaseIOStream(object):
assert callback is None
future = self._read_future
self._read_future = None
future.set_result(self._consume(size))
future.set_result(result)
if callback is not None:
assert (self._read_future is None) or streaming
self._run_callback(callback, self._consume(size))
self._run_callback(callback, result)
else:
# If we scheduled a callback, we will add the error listener
# afterwards. If we didn't, we have to do it now.
@ -734,31 +882,44 @@ class BaseIOStream(object):
to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception.
"""
while True:
try:
chunk = self.read_from_fd()
except (socket.error, IOError, OSError) as e:
if errno_from_exception(e) == errno.EINTR:
continue
# ssl.SSLError is a subclass of socket.error
if self._is_connreset(e):
# Treat ECONNRESET as a connection close rather than
# an error to minimize log spam (the exception will
# be available on self.error for apps that care).
self.close(exc_info=True)
return
self.close(exc_info=True)
raise
break
if chunk is None:
return 0
self._read_buffer += chunk
self._read_buffer_size += len(chunk)
try:
while True:
try:
if self._user_read_buffer:
buf = memoryview(self._read_buffer)[self._read_buffer_size:]
else:
buf = bytearray(self.read_chunk_size)
bytes_read = self.read_from_fd(buf)
except (socket.error, IOError, OSError) as e:
if errno_from_exception(e) == errno.EINTR:
continue
# ssl.SSLError is a subclass of socket.error
if self._is_connreset(e):
# Treat ECONNRESET as a connection close rather than
# an error to minimize log spam (the exception will
# be available on self.error for apps that care).
self.close(exc_info=e)
return
self.close(exc_info=e)
raise
break
if bytes_read is None:
return 0
elif bytes_read == 0:
self.close()
return 0
if not self._user_read_buffer:
self._read_buffer += memoryview(buf)[:bytes_read]
self._read_buffer_size += bytes_read
finally:
# Break the reference to buf so we don't waste a chunk's worth of
# memory in case an exception hangs on to our stack frame.
buf = None
if self._read_buffer_size > self.max_buffer_size:
gen_log.error("Reached maximum read buffer size")
self.close()
raise StreamBufferFullError("Reached maximum read buffer size")
return len(chunk)
return bytes_read
def _run_streaming_callback(self):
if self._streaming_callback is not None and self._read_buffer_size:
@ -828,56 +989,28 @@ class BaseIOStream(object):
"delimiter %r not found within %d bytes" % (
delimiter, self._read_max_bytes))
def _freeze_write_buffer(self, size):
self._write_buffer_frozen = size
def _unfreeze_write_buffer(self):
self._write_buffer_frozen = False
self._write_buffer += b''.join(self._pending_writes_while_frozen)
self._write_buffer_size += sum(map(len, self._pending_writes_while_frozen))
self._pending_writes_while_frozen[:] = []
def _got_empty_write(self, size):
"""
Called when a non-blocking write() failed writing anything.
Can be overridden in subclasses.
"""
def _handle_write(self):
while self._write_buffer_size:
assert self._write_buffer_size >= 0
while True:
size = len(self._write_buffer)
if not size:
break
assert size > 0
try:
start = self._write_buffer_pos
if self._write_buffer_frozen:
size = self._write_buffer_frozen
elif _WINDOWS:
if _WINDOWS:
# On windows, socket.send blows up if given a
# write buffer that's too large, instead of just
# returning the number of bytes it was able to
# process. Therefore we must not call socket.send
# with more than 128KB at a time.
size = 128 * 1024
else:
size = self._write_buffer_size
num_bytes = self.write_to_fd(
memoryview(self._write_buffer)[start:start + size])
num_bytes = self.write_to_fd(self._write_buffer.peek(size))
if num_bytes == 0:
self._got_empty_write(size)
break
self._write_buffer_pos += num_bytes
self._write_buffer_size -= num_bytes
# Amortized O(1) shrink
# (this heuristic is implemented natively in Python 3.4+
# but is replicated here for Python 2)
if self._write_buffer_pos > self._write_buffer_size:
del self._write_buffer[:self._write_buffer_pos]
self._write_buffer_pos = 0
if self._write_buffer_frozen:
self._unfreeze_write_buffer()
self._write_buffer.advance(num_bytes)
self._total_write_done_index += num_bytes
except (socket.error, IOError, OSError) as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
self._got_empty_write(size)
break
else:
if not self._is_connreset(e):
@ -886,7 +1019,7 @@ class BaseIOStream(object):
# minimize log spam
gen_log.warning("Write error on %s: %s",
self.fileno(), e)
self.close(exc_info=True)
self.close(exc_info=e)
return
while self._write_futures:
@ -896,7 +1029,7 @@ class BaseIOStream(object):
self._write_futures.popleft()
future.set_result(None)
if not self._write_buffer_size:
if not len(self._write_buffer):
if self._write_callback:
callback = self._write_callback
self._write_callback = None
@ -1048,21 +1181,24 @@ class IOStream(BaseIOStream):
socket.SO_ERROR)
return socket.error(errno, os.strerror(errno))
def read_from_fd(self):
def read_from_fd(self, buf):
try:
chunk = self.socket.recv(self.read_chunk_size)
return self.socket.recv_into(buf)
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
finally:
buf = None
def write_to_fd(self, data):
return self.socket.send(data)
try:
return self.socket.send(data)
finally:
# Avoid keeping to data, which can be a memoryview.
# See https://github.com/tornadoweb/tornado/pull/2008
del data
def connect(self, address, callback=None, server_hostname=None):
"""Connects the socket to a remote address without blocking.
@ -1108,7 +1244,7 @@ class IOStream(BaseIOStream):
self._connect_callback = stack_context.wrap(callback)
future = None
else:
future = self._connect_future = TracebackFuture()
future = self._connect_future = Future()
try:
self.socket.connect(address)
except socket.error as e:
@ -1124,7 +1260,7 @@ class IOStream(BaseIOStream):
if future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), e)
self.close(exc_info=True)
self.close(exc_info=e)
return future
self._add_io_state(self.io_loop.WRITE)
return future
@ -1186,9 +1322,8 @@ class IOStream(BaseIOStream):
orig_close_callback = self._close_callback
self._close_callback = None
future = TracebackFuture()
ssl_stream = SSLIOStream(socket, ssl_options=ssl_options,
io_loop=self.io_loop)
future = Future()
ssl_stream = SSLIOStream(socket, ssl_options=ssl_options)
# Wrap the original close callback so we can fail our Future as well.
# If we had an "unwrap" counterpart to this method we would need
# to restore the original callback after our Future resolves
@ -1292,17 +1427,6 @@ class SSLIOStream(IOStream):
def writing(self):
return self._handshake_writing or super(SSLIOStream, self).writing()
def _got_empty_write(self, size):
# With OpenSSL, if we couldn't write the entire buffer,
# the very same string object must be used on the
# next call to send. Therefore we suppress
# merging the write buffer after an incomplete send.
# A cleaner solution would be to set
# SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is
# not yet accessible from python
# (http://bugs.python.org/issue8240)
self._freeze_write_buffer(size)
def _do_ssl_handshake(self):
# Based on code from test_ssl.py in the python stdlib
try:
@ -1318,7 +1442,7 @@ class SSLIOStream(IOStream):
return
elif err.args[0] in (ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN):
return self.close(exc_info=True)
return self.close(exc_info=err)
elif err.args[0] == ssl.SSL_ERROR_SSL:
try:
peer = self.socket.getpeername()
@ -1326,7 +1450,7 @@ class SSLIOStream(IOStream):
peer = '(not connected)'
gen_log.warning("SSL Error on %s %s: %s",
self.socket.fileno(), peer, err)
return self.close(exc_info=True)
return self.close(exc_info=err)
raise
except socket.error as err:
# Some port scans (e.g. nmap in -sT mode) have been known
@ -1335,13 +1459,13 @@ class SSLIOStream(IOStream):
# https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0
if (self._is_connreset(err) or
err.args[0] in (errno.EBADF, errno.ENOTCONN)):
return self.close(exc_info=True)
return self.close(exc_info=err)
raise
except AttributeError:
except AttributeError as err:
# On Linux, if the connection was reset before the call to
# wrap_socket, do_handshake will fail with an
# AttributeError.
return self.close(exc_info=True)
return self.close(exc_info=err)
else:
self._ssl_accepting = False
if not self._verify_cert(self.socket.getpeercert()):
@ -1379,8 +1503,8 @@ class SSLIOStream(IOStream):
gen_log.warning("No SSL certificate given")
return False
try:
ssl_match_hostname(peercert, self._server_hostname)
except SSLCertificateError as e:
ssl.match_hostname(peercert, self._server_hostname)
except ssl.CertificateError as e:
gen_log.warning("Invalid SSL certificate: %s" % e)
return False
else:
@ -1454,7 +1578,7 @@ class SSLIOStream(IOStream):
self._ssl_connect_callback = stack_context.wrap(callback)
future = None
else:
future = self._ssl_connect_future = TracebackFuture()
future = self._ssl_connect_future = Future()
if not self._ssl_accepting:
self._run_ssl_connect_callback()
return future
@ -1472,36 +1596,34 @@ class SSLIOStream(IOStream):
# simply return 0 bytes written.
return 0
raise
finally:
# Avoid keeping to data, which can be a memoryview.
# See https://github.com/tornadoweb/tornado/pull/2008
del data
def read_from_fd(self):
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
# depending on the SSL version)
return None
def read_from_fd(self, buf):
try:
# SSLSocket objects have both a read() and recv() method,
# while regular sockets only have recv().
# The recv() method blocks (at least in python 2.6) if it is
# called when there is nothing to read, so we have to use
# read() instead.
chunk = self.socket.read(self.read_chunk_size)
except ssl.SSLError as e:
# SSLError is a subclass of socket.error, so this except
# block must come first.
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
# depending on the SSL version)
return None
else:
raise
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
try:
return self.socket.recv_into(buf)
except ssl.SSLError as e:
# SSLError is a subclass of socket.error, so this except
# block must come first.
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
return None
else:
raise
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
finally:
buf = None
def _is_connreset(self, e):
if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF:
@ -1519,6 +1641,7 @@ class PipeIOStream(BaseIOStream):
"""
def __init__(self, fd, *args, **kwargs):
self.fd = fd
self._fio = io.FileIO(self.fd, "r+")
_set_nonblocking(fd)
super(PipeIOStream, self).__init__(*args, **kwargs)
@ -1526,28 +1649,29 @@ class PipeIOStream(BaseIOStream):
return self.fd
def close_fd(self):
os.close(self.fd)
self._fio.close()
def write_to_fd(self, data):
return os.write(self.fd, data)
def read_from_fd(self):
try:
chunk = os.read(self.fd, self.read_chunk_size)
return os.write(self.fd, data)
finally:
# Avoid keeping to data, which can be a memoryview.
# See https://github.com/tornadoweb/tornado/pull/2008
del data
def read_from_fd(self, buf):
try:
return self._fio.readinto(buf)
except (IOError, OSError) as e:
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
return None
elif errno_from_exception(e) == errno.EBADF:
if errno_from_exception(e) == errno.EBADF:
# If the writing half of a pipe is closed, select will
# report it as readable but reads will fail with EBADF.
self.close(exc_info=True)
self.close(exc_info=e)
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
finally:
buf = None
def doctests():

View file

@ -1,5 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may

View file

@ -15,9 +15,10 @@
from __future__ import absolute_import, division, print_function
import collections
from concurrent.futures import CancelledError
from tornado import gen, ioloop
from tornado.concurrent import Future
from tornado.concurrent import Future, future_set_result_unless_cancelled
__all__ = ['Condition', 'Event', 'Semaphore', 'BoundedSemaphore', 'Lock']
@ -99,8 +100,12 @@ class Condition(_TimeoutGarbageCollector):
# Wait up to 1 second.
yield condition.wait(timeout=datetime.timedelta(seconds=1))
The method raises `tornado.gen.TimeoutError` if there's no notification
before the deadline.
The method returns False if there's no notification before the deadline.
.. versionchanged:: 5.0
Previously, waiters could be notified synchronously from within
`notify`. Now, the notification will always be received on the
next iteration of the `.IOLoop`.
"""
def __init__(self):
@ -123,7 +128,8 @@ class Condition(_TimeoutGarbageCollector):
self._waiters.append(waiter)
if timeout:
def on_timeout():
waiter.set_result(False)
if not waiter.done():
future_set_result_unless_cancelled(waiter, False)
self._garbage_collect()
io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
@ -141,7 +147,7 @@ class Condition(_TimeoutGarbageCollector):
waiters.append(waiter)
for waiter in waiters:
waiter.set_result(True)
future_set_result_unless_cancelled(waiter, True)
def notify_all(self):
"""Wake all waiters."""
@ -191,7 +197,8 @@ class Event(object):
Done
"""
def __init__(self):
self._future = Future()
self._value = False
self._waiters = set()
def __repr__(self):
return '<%s %s>' % (
@ -199,34 +206,48 @@ class Event(object):
def is_set(self):
"""Return ``True`` if the internal flag is true."""
return self._future.done()
return self._value
def set(self):
"""Set the internal flag to ``True``. All waiters are awakened.
Calling `.wait` once the flag is set will not block.
"""
if not self._future.done():
self._future.set_result(None)
if not self._value:
self._value = True
for fut in self._waiters:
if not fut.done():
fut.set_result(None)
def clear(self):
"""Reset the internal flag to ``False``.
Calls to `.wait` will block until `.set` is called.
"""
if self._future.done():
self._future = Future()
self._value = False
def wait(self, timeout=None):
"""Block until the internal flag is true.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
Returns a Future, which raises `tornado.util.TimeoutError` after a
timeout.
"""
fut = Future()
if self._value:
fut.set_result(None)
return fut
self._waiters.add(fut)
fut.add_done_callback(lambda fut: self._waiters.remove(fut))
if timeout is None:
return self._future
return fut
else:
return gen.with_timeout(timeout, self._future)
timeout_fut = gen.with_timeout(timeout, fut, quiet_exceptions=(CancelledError,))
# This is a slightly clumsy workaround for the fact that
# gen.with_timeout doesn't cancel its futures. Cancelling
# fut will remove it from the waiters list.
timeout_fut.add_done_callback(lambda tf: fut.cancel() if not fut.done() else None)
return timeout_fut
class _ReleasingContextManager(object):
@ -272,6 +293,8 @@ class Semaphore(_TimeoutGarbageCollector):
@gen.coroutine
def simulator(futures):
for f in futures:
# simulate the asynchronous passage of time
yield gen.moment
yield gen.moment
f.set_result(None)
@ -388,7 +411,8 @@ class Semaphore(_TimeoutGarbageCollector):
self._waiters.append(waiter)
if timeout:
def on_timeout():
waiter.set_exception(gen.TimeoutError())
if not waiter.done():
waiter.set_exception(gen.TimeoutError())
self._garbage_collect()
io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
@ -458,7 +482,7 @@ class Lock(object):
``async with`` includes both the ``yield`` and the ``acquire``
(just as it does with `threading.Lock`):
>>> async def f(): # doctest: +SKIP
>>> async def f2(): # doctest: +SKIP
... async with lock:
... # Do something holding the lock.
... pass
@ -480,7 +504,7 @@ class Lock(object):
def acquire(self, timeout=None):
"""Attempt to lock. Returns a Future.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
Returns a Future, which raises `tornado.util.TimeoutError` after a
timeout.
"""
return self._block.acquire(timeout)

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#
@ -102,7 +101,8 @@ class LogFormatter(logging.Formatter):
Added support for ``colorama``. Changed the constructor
signature to be compatible with `logging.config.dictConfig`.
"""
DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
DEFAULT_FORMAT = \
'%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue
@ -115,13 +115,13 @@ class LogFormatter(logging.Formatter):
style='%', color=True, colors=DEFAULT_COLORS):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
:arg str fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
:arg str datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
@ -177,7 +177,7 @@ class LogFormatter(logging.Formatter):
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings whereever possible).
# byte strings wherever possible).
record.message = _safe_unicode(message)
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
@ -25,6 +24,7 @@ import socket
import stat
from tornado.concurrent import dummy_executor, run_on_executor
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec
from tornado.util import PY3, Configurable, errno_from_exception
@ -35,54 +35,20 @@ except ImportError:
# ssl is not available on Google App Engine
ssl = None
try:
import certifi
except ImportError:
# certifi is optional as long as we have ssl.create_default_context.
if ssl is None or hasattr(ssl, 'create_default_context'):
certifi = None
else:
raise
if PY3:
xrange = range
if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+
ssl_match_hostname = ssl.match_hostname
SSLCertificateError = ssl.CertificateError
elif ssl is None:
ssl_match_hostname = SSLCertificateError = None # type: ignore
else:
import backports.ssl_match_hostname
ssl_match_hostname = backports.ssl_match_hostname.match_hostname
SSLCertificateError = backports.ssl_match_hostname.CertificateError # type: ignore
if hasattr(ssl, 'SSLContext'):
if hasattr(ssl, 'create_default_context'):
# Python 2.7.9+, 3.4+
# Note that the naming of ssl.Purpose is confusing; the purpose
# of a context is to authentiate the opposite side of the connection.
_client_ssl_defaults = ssl.create_default_context(
ssl.Purpose.SERVER_AUTH)
_server_ssl_defaults = ssl.create_default_context(
ssl.Purpose.CLIENT_AUTH)
else:
# Python 3.2-3.3
_client_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
_client_ssl_defaults.verify_mode = ssl.CERT_REQUIRED
_client_ssl_defaults.load_verify_locations(certifi.where())
_server_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(ssl, 'OP_NO_COMPRESSION'):
# Disable TLS compression to avoid CRIME and related attacks.
# This constant wasn't added until python 3.3.
_client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
_server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
elif ssl:
# Python 2.6-2.7.8
_client_ssl_defaults = dict(cert_reqs=ssl.CERT_REQUIRED,
ca_certs=certifi.where())
_server_ssl_defaults = {}
if ssl is not None:
# Note that the naming of ssl.Purpose is confusing; the purpose
# of a context is to authentiate the opposite side of the connection.
_client_ssl_defaults = ssl.create_default_context(
ssl.Purpose.SERVER_AUTH)
_server_ssl_defaults = ssl.create_default_context(
ssl.Purpose.CLIENT_AUTH)
if hasattr(ssl, 'OP_NO_COMPRESSION'):
# See netutil.ssl_options_to_context
_client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
_server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
else:
# Google App Engine
_client_ssl_defaults = dict(cert_reqs=None,
@ -232,7 +198,7 @@ if hasattr(socket, 'AF_UNIX'):
return sock
def add_accept_handler(sock, callback, io_loop=None):
def add_accept_handler(sock, callback):
"""Adds an `.IOLoop` event handler to accept new connections on ``sock``.
When a connection is accepted, ``callback(connection, address)`` will
@ -241,11 +207,17 @@ def add_accept_handler(sock, callback, io_loop=None):
is different from the ``callback(fd, events)`` signature used for
`.IOLoop` handlers.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
A callable is returned which, when called, will remove the `.IOLoop`
event handler and stop processing further incoming connections.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. versionchanged:: 5.0
A callable is returned (``None`` was returned before).
"""
if io_loop is None:
io_loop = IOLoop.current()
io_loop = IOLoop.current()
removed = [False]
def accept_handler(fd, events):
# More connections may come in while we're handling callbacks;
@ -260,6 +232,9 @@ def add_accept_handler(sock, callback, io_loop=None):
# heuristic for the number of connections we can reasonably
# accept at once.
for i in xrange(_DEFAULT_BACKLOG):
if removed[0]:
# The socket was probably closed
return
try:
connection, address = sock.accept()
except socket.error as e:
@ -273,8 +248,15 @@ def add_accept_handler(sock, callback, io_loop=None):
if errno_from_exception(e) == errno.ECONNABORTED:
continue
raise
set_close_exec(connection.fileno())
callback(connection, address)
def remove_handler():
io_loop.remove_handler(sock)
removed[0] = True
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
return remove_handler
def is_valid_ip(ip):
@ -310,11 +292,16 @@ class Resolver(Configurable):
The implementations of this interface included with Tornado are
* `tornado.netutil.BlockingResolver`
* `tornado.netutil.ThreadedResolver`
* `tornado.netutil.DefaultExecutorResolver`
* `tornado.netutil.BlockingResolver` (deprecated)
* `tornado.netutil.ThreadedResolver` (deprecated)
* `tornado.netutil.OverrideResolver`
* `tornado.platform.twisted.TwistedResolver`
* `tornado.platform.caresresolver.CaresResolver`
.. versionchanged:: 5.0
The default implementation has changed from `BlockingResolver` to
`DefaultExecutorResolver`.
"""
@classmethod
def configurable_base(cls):
@ -322,7 +309,7 @@ class Resolver(Configurable):
@classmethod
def configurable_default(cls):
return BlockingResolver
return DefaultExecutorResolver
def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None):
"""Resolves an address.
@ -341,6 +328,10 @@ class Resolver(Configurable):
.. versionchanged:: 4.4
Standardized all implementations to raise `IOError`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in 6.0.
Use the returned awaitable object instead.
"""
raise NotImplementedError()
@ -353,6 +344,31 @@ class Resolver(Configurable):
pass
def _resolve_addr(host, port, family=socket.AF_UNSPEC):
# On Solaris, getaddrinfo fails if the given port is not found
# in /etc/services and no socket type is given, so we must pass
# one here. The socket type used here doesn't seem to actually
# matter (we discard the one we get back in the results),
# so the addresses we return should still be usable with SOCK_DGRAM.
addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
results = []
for family, socktype, proto, canonname, address in addrinfo:
results.append((family, address))
return results
class DefaultExecutorResolver(Resolver):
"""Resolver implementation using `.IOLoop.run_in_executor`.
.. versionadded:: 5.0
"""
@gen.coroutine
def resolve(self, host, port, family=socket.AF_UNSPEC):
result = yield IOLoop.current().run_in_executor(
None, _resolve_addr, host, port, family)
raise gen.Return(result)
class ExecutorResolver(Resolver):
"""Resolver implementation using a `concurrent.futures.Executor`.
@ -363,11 +379,15 @@ class ExecutorResolver(Resolver):
``close_resolver=False``; use this if you want to reuse the same
executor elsewhere.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. deprecated:: 5.0
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
of this class.
"""
def initialize(self, io_loop=None, executor=None, close_executor=True):
self.io_loop = io_loop or IOLoop.current()
def initialize(self, executor=None, close_executor=True):
self.io_loop = IOLoop.current()
if executor is not None:
self.executor = executor
self.close_executor = close_executor
@ -382,16 +402,7 @@ class ExecutorResolver(Resolver):
@run_on_executor
def resolve(self, host, port, family=socket.AF_UNSPEC):
# On Solaris, getaddrinfo fails if the given port is not found
# in /etc/services and no socket type is given, so we must pass
# one here. The socket type used here doesn't seem to actually
# matter (we discard the one we get back in the results),
# so the addresses we return should still be usable with SOCK_DGRAM.
addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
results = []
for family, socktype, proto, canonname, address in addrinfo:
results.append((family, address))
return results
return _resolve_addr(host, port, family)
class BlockingResolver(ExecutorResolver):
@ -399,9 +410,13 @@ class BlockingResolver(ExecutorResolver):
The `.IOLoop` will be blocked during the resolution, although the
callback will not be run until the next `.IOLoop` iteration.
.. deprecated:: 5.0
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
of this class.
"""
def initialize(self, io_loop=None):
super(BlockingResolver, self).initialize(io_loop=io_loop)
def initialize(self):
super(BlockingResolver, self).initialize()
class ThreadedResolver(ExecutorResolver):
@ -419,14 +434,18 @@ class ThreadedResolver(ExecutorResolver):
.. versionchanged:: 3.1
All ``ThreadedResolvers`` share a single thread pool, whose
size is set by the first one to be created.
.. deprecated:: 5.0
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
of this class.
"""
_threadpool = None # type: ignore
_threadpool_pid = None # type: int
def initialize(self, io_loop=None, num_threads=10):
def initialize(self, num_threads=10):
threadpool = ThreadedResolver._create_threadpool(num_threads)
super(ThreadedResolver, self).initialize(
io_loop=io_loop, executor=threadpool, close_executor=False)
executor=threadpool, close_executor=False)
@classmethod
def _create_threadpool(cls, num_threads):
@ -448,7 +467,21 @@ class OverrideResolver(Resolver):
This can be used to make local DNS changes (e.g. for testing)
without modifying system-wide settings.
The mapping can contain either host strings or host-port pairs.
The mapping can be in three formats::
{
# Hostname to host or ip
"example.com": "127.0.1.1",
# Host+port to host+port
("login.example.com", 443): ("localhost", 1443),
# Host+port+address family to host+port
("login.example.com", 443, socket.AF_INET6): ("::1", 1443),
}
.. versionchanged:: 5.0
Added support for host-port-family triplets.
"""
def initialize(self, resolver, mapping):
self.resolver = resolver
@ -457,12 +490,14 @@ class OverrideResolver(Resolver):
def close(self):
self.resolver.close()
def resolve(self, host, port, *args, **kwargs):
if (host, port) in self.mapping:
def resolve(self, host, port, family=socket.AF_UNSPEC, *args, **kwargs):
if (host, port, family) in self.mapping:
host, port = self.mapping[(host, port, family)]
elif (host, port) in self.mapping:
host, port = self.mapping[(host, port)]
elif host in self.mapping:
host = self.mapping[host]
return self.resolver.resolve(host, port, *args, **kwargs)
return self.resolver.resolve(host, port, family, *args, **kwargs)
# These are the keyword arguments to ssl.wrap_socket that must be translated
@ -483,11 +518,12 @@ def ssl_options_to_context(ssl_options):
accepts both forms needs to upgrade to the `~ssl.SSLContext` version
to use features like SNI or NPN.
"""
if isinstance(ssl_options, dict):
assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
if (not hasattr(ssl, 'SSLContext') or
isinstance(ssl_options, ssl.SSLContext)):
if isinstance(ssl_options, ssl.SSLContext):
return ssl_options
assert isinstance(ssl_options, dict)
assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
# Can't use create_default_context since this interface doesn't
# tell us client vs server.
context = ssl.SSLContext(
ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23))
if 'certfile' in ssl_options:
@ -500,7 +536,9 @@ def ssl_options_to_context(ssl_options):
context.set_ciphers(ssl_options['ciphers'])
if hasattr(ssl, 'OP_NO_COMPRESSION'):
# Disable TLS compression to avoid CRIME and related attacks.
# This constant wasn't added until python 3.3.
# This constant depends on openssl version 1.0.
# TODO: Do we need to do this ourselves or can we trust
# the defaults?
context.options |= ssl.OP_NO_COMPRESSION
return context
@ -515,14 +553,13 @@ def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs):
appropriate).
"""
context = ssl_options_to_context(ssl_options)
if hasattr(ssl, 'SSLContext') and isinstance(context, ssl.SSLContext):
if server_hostname is not None and getattr(ssl, 'HAS_SNI'):
# Python doesn't have server-side SNI support so we can't
# really unittest this, but it can be manually tested with
# python3.2 -m tornado.httpclient https://sni.velox.ch
return context.wrap_socket(socket, server_hostname=server_hostname,
**kwargs)
else:
return context.wrap_socket(socket, **kwargs)
if ssl.HAS_SNI:
# In python 3.4, wrap_socket only accepts the server_hostname
# argument if HAS_SNI is true.
# TODO: add a unittest (python added server-side SNI support in 3.4)
# In the meantime it can be manually tested with
# python3 -m tornado.httpclient https://sni.velox.ch
return context.wrap_socket(socket, server_hostname=server_hostname,
**kwargs)
else:
return ssl.wrap_socket(socket, **dict(context, **kwargs)) # type: ignore
return context.wrap_socket(socket, **kwargs)

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
@ -16,9 +15,19 @@
"""A command line parsing module that lets modules define their own options.
Each module defines its own options which are added to the global
option namespace, e.g.::
This module is inspired by Google's `gflags
<https://github.com/google/python-gflags>`_. The primary difference
with libraries such as `argparse` is that a global registry is used so
that options may be defined in any module (it also enables
`tornado.log` by default). The rest of Tornado does not depend on this
module, so feel free to use `argparse` or other configuration
libraries if you prefer them.
Options must be defined with `tornado.options.define` before use,
generally at the top level of a module. The options are then
accessible as attributes of `tornado.options.options`::
# myapp/db.py
from tornado.options import define, options
define("mysql_host", default="127.0.0.1:3306", help="Main user DB")
@ -29,34 +38,36 @@ option namespace, e.g.::
db = database.Connection(options.mysql_host)
...
# myapp/server.py
from tornado.options import define, options
define("port", default=8080, help="port to listen on")
def start_server():
app = make_app()
app.listen(options.port)
The ``main()`` method of your application does not need to be aware of all of
the options used throughout your program; they are all automatically loaded
when the modules are loaded. However, all modules that define options
must have been imported before the command line is parsed.
Your ``main()`` method can parse the command line or parse a config file with
either::
either `parse_command_line` or `parse_config_file`::
tornado.options.parse_command_line()
# or
tornado.options.parse_config_file("/etc/server.conf")
import myapp.db, myapp.server
import tornado.options
.. note:
if __name__ == '__main__':
tornado.options.parse_command_line()
# or
tornado.options.parse_config_file("/etc/server.conf")
When using tornado.options.parse_command_line or
tornado.options.parse_config_file, the only options that are set are
ones that were previously defined with tornado.options.define.
.. note::
Command line formats are what you would expect (``--myoption=myvalue``).
Config files are just Python files. Global names become options, e.g.::
myoption = "myvalue"
myotheroption = "myothervalue"
We support `datetimes <datetime.datetime>`, `timedeltas
<datetime.timedelta>`, ints, and floats (just pass a ``type`` kwarg to
`define`). We also accept multi-value options. See the documentation for
`define()` below.
When using multiple ``parse_*`` functions, pass ``final=False`` to all
but the last one, or side effects may occur twice (in particular,
this can result in log messages being doubled).
`tornado.options.options` is a singleton instance of `OptionParser`, and
the top-level functions in this module (`define`, `parse_command_line`, etc)
@ -80,6 +91,7 @@ instances to define isolated sets of options, such as for subcommands.
options can be defined, set, and read with any mix of the two.
Dashes are typical for command-line usage while config files require
underscores.
"""
from __future__ import absolute_import, division, print_function
@ -190,13 +202,13 @@ class OptionParser(object):
multiple=False, group=None, callback=None):
"""Defines a new command line option.
If ``type`` is given (one of str, float, int, datetime, or timedelta)
or can be inferred from the ``default``, we parse the command line
arguments based on the given type. If ``multiple`` is True, we accept
comma-separated values, and the option value is always a list.
``type`` can be any of `str`, `int`, `float`, `bool`,
`~datetime.datetime`, or `~datetime.timedelta`. If no ``type``
is given but a ``default`` is, ``type`` is the type of
``default``. Otherwise, ``type`` defaults to `str`.
For multi-value integers, we also accept the syntax ``x:y``, which
turns into ``range(x, y)`` - very useful for long integer ranges.
If ``multiple`` is True, the option value is a list of ``type``
instead of an instance of ``type``.
``help`` and ``metavar`` are used to construct the
automatically generated command line help string. The help
@ -208,9 +220,7 @@ class OptionParser(object):
groups. By default, command line options are grouped by the
file in which they are defined.
Command line option names must be unique globally. They can be parsed
from the command line with `parse_command_line` or parsed from a
config file with `parse_config_file`.
Command line option names must be unique globally.
If a ``callback`` is given, it will be run with the new value whenever
the option is changed. This can be used to combine command-line
@ -222,10 +232,12 @@ class OptionParser(object):
With this definition, options in the file specified by ``--config`` will
override options set earlier on the command line, but can be overridden
by later flags.
"""
if name in self._options:
normalized = self._normalize_name(name)
if normalized in self._options:
raise Error("Option %r already defined in %s" %
(name, self._options[name].file_name))
(normalized, self._options[normalized].file_name))
frame = sys._getframe(0)
options_file = frame.f_code.co_filename
@ -247,7 +259,6 @@ class OptionParser(object):
group_name = group
else:
group_name = file_name
normalized = self._normalize_name(name)
option = _Option(name, file_name=file_name,
default=default, type=type, help=help,
metavar=metavar, multiple=multiple,
@ -259,6 +270,14 @@ class OptionParser(object):
"""Parses all options given on the command line (defaults to
`sys.argv`).
Options look like ``--option=value`` and are parsed according
to their ``type``. For boolean options, ``--option`` is
equivalent to ``--option=true``
If the option has ``multiple=True``, comma-separated values
are accepted. For multi-value integer options, the syntax
``x:y`` is also accepted and equivalent to ``range(x, y)``.
Note that ``args[0]`` is ignored since it is the program name
in `sys.argv`.
@ -267,6 +286,7 @@ class OptionParser(object):
If ``final`` is ``False``, parse callbacks will not be run.
This is useful for applications that wish to combine configurations
from multiple sources.
"""
if args is None:
args = sys.argv
@ -299,12 +319,37 @@ class OptionParser(object):
return remaining
def parse_config_file(self, path, final=True):
"""Parses and loads the Python config file at the given path.
"""Parses and loads the config file at the given path.
The config file contains Python code that will be executed (so
it is **not safe** to use untrusted config files). Anything in
the global namespace that matches a defined option will be
used to set that option's value.
Options are not parsed from strings as they would be on the
command line; they should be set to the correct type (this
means if you have ``datetime`` or ``timedelta`` options you
will need to import those modules in the config file.
Example (using the options defined in the top-level docs of
this module)::
port = 80
mysql_host = 'mydb.example.com:3306'
memcache_hosts = ['cache1.example.com:11011',
'cache2.example.com:11011']
If ``final`` is ``False``, parse callbacks will not be run.
This is useful for applications that wish to combine configurations
from multiple sources.
.. note::
`tornado.options` is primarily a command-line library.
Config file support is provided for applications that wish
to use it, but applications that prefer config files may
wish to look at other libraries instead.
.. versionchanged:: 4.1
Config files are now always interpreted as utf-8 instead of
the system default encoding.
@ -312,6 +357,7 @@ class OptionParser(object):
.. versionchanged:: 4.4
The special variable ``__file__`` is available inside config
files, specifying the absolute path to the config file itself.
"""
config = {'__file__': os.path.abspath(path)}
with open(path, 'rb') as f:

View file

@ -3,14 +3,14 @@
.. versionadded:: 3.2
This module integrates Tornado with the ``asyncio`` module introduced
in Python 3.4 (and available `as a separate download
<https://pypi.python.org/pypi/asyncio>`_ for Python 3.3). This makes
it possible to combine the two libraries on the same event loop.
in Python 3.4. This makes it possible to combine the two libraries on
the same event loop.
Most applications should use `AsyncIOMainLoop` to run Tornado on the
default ``asyncio`` event loop. Applications that need to run event
loops on multiple threads may use `AsyncIOLoop` to create multiple
loops.
.. deprecated:: 5.0
While the code in this module is still used, it is now enabled
automatically when `asyncio` is available, so applications should
no longer need to refer to this module directly.
.. note::
@ -22,35 +22,38 @@ loops.
from __future__ import absolute_import, division, print_function
import functools
import tornado.concurrent
from tornado.gen import convert_yielded
from tornado.ioloop import IOLoop
from tornado import stack_context
try:
# Import the real asyncio module for py33+ first. Older versions of the
# trollius backport also use this name.
import asyncio # type: ignore
except ImportError as e:
# Asyncio itself isn't available; see if trollius is (backport to py26+).
try:
import trollius as asyncio # type: ignore
except ImportError:
# Re-raise the original asyncio error, not the trollius one.
raise e
import asyncio
class BaseAsyncIOLoop(IOLoop):
def initialize(self, asyncio_loop, close_loop=False, **kwargs):
super(BaseAsyncIOLoop, self).initialize(**kwargs)
def initialize(self, asyncio_loop, **kwargs):
self.asyncio_loop = asyncio_loop
self.close_loop = close_loop
# Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
self.handlers = {}
# Set of fds listening for reads/writes
self.readers = set()
self.writers = set()
self.closing = False
# If an asyncio loop was closed through an asyncio interface
# instead of IOLoop.close(), we'd never hear about it and may
# have left a dangling reference in our map. In case an
# application (or, more likely, a test suite) creates and
# destroys a lot of event loops in this way, check here to
# ensure that we don't have a lot of dead loops building up in
# the map.
#
# TODO(bdarnell): consider making self.asyncio_loop a weakref
# for AsyncIOMainLoop and make _ioloop_for_asyncio a
# WeakKeyDictionary.
for loop in list(IOLoop._ioloop_for_asyncio):
if loop.is_closed():
del IOLoop._ioloop_for_asyncio[loop]
IOLoop._ioloop_for_asyncio[asyncio_loop] = self
super(BaseAsyncIOLoop, self).initialize(**kwargs)
def close(self, all_fds=False):
self.closing = True
@ -59,8 +62,8 @@ class BaseAsyncIOLoop(IOLoop):
self.remove_handler(fd)
if all_fds:
self.close_fd(fileobj)
if self.close_loop:
self.asyncio_loop.close()
self.asyncio_loop.close()
del IOLoop._ioloop_for_asyncio[self.asyncio_loop]
def add_handler(self, fd, handler, events):
fd, fileobj = self.split_fd(fd)
@ -114,16 +117,16 @@ class BaseAsyncIOLoop(IOLoop):
handler_func(fileobj, events)
def start(self):
old_current = IOLoop.current(instance=False)
try:
old_loop = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
old_loop = None
try:
self._setup_logging()
self.make_current()
asyncio.set_event_loop(self.asyncio_loop)
self.asyncio_loop.run_forever()
finally:
if old_current is None:
IOLoop.clear_current()
else:
old_current.make_current()
asyncio.set_event_loop(old_loop)
def stop(self):
self.asyncio_loop.stop()
@ -140,67 +143,110 @@ class BaseAsyncIOLoop(IOLoop):
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
if self.closing:
# TODO: this is racy; we need a lock to ensure that the
# loop isn't closed during call_soon_threadsafe.
raise RuntimeError("IOLoop is closing")
self.asyncio_loop.call_soon_threadsafe(
self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
try:
self.asyncio_loop.call_soon_threadsafe(
self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
except RuntimeError:
# "Event loop is closed". Swallow the exception for
# consistency with PollIOLoop (and logical consistency
# with the fact that we can't guarantee that an
# add_callback that completes without error will
# eventually execute).
pass
add_callback_from_signal = add_callback
def run_in_executor(self, executor, func, *args):
return self.asyncio_loop.run_in_executor(executor, func, *args)
def set_default_executor(self, executor):
return self.asyncio_loop.set_default_executor(executor)
class AsyncIOMainLoop(BaseAsyncIOLoop):
"""``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the
current ``asyncio`` event loop (i.e. the one returned by
``asyncio.get_event_loop()``). Recommended usage::
``asyncio.get_event_loop()``).
from tornado.platform.asyncio import AsyncIOMainLoop
import asyncio
AsyncIOMainLoop().install()
asyncio.get_event_loop().run_forever()
.. deprecated:: 5.0
See also :meth:`tornado.ioloop.IOLoop.install` for general notes on
installing alternative IOLoops.
Now used automatically when appropriate; it is no longer necessary
to refer to this class directly.
.. versionchanged:: 5.0
Closing an `AsyncIOMainLoop` now closes the underlying asyncio loop.
"""
def initialize(self, **kwargs):
super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(),
close_loop=False, **kwargs)
super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(), **kwargs)
def make_current(self):
# AsyncIOMainLoop already refers to the current asyncio loop so
# nothing to do here.
pass
class AsyncIOLoop(BaseAsyncIOLoop):
"""``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop.
This class follows the usual Tornado semantics for creating new
``IOLoops``; these loops are not necessarily related to the
``asyncio`` default event loop. Recommended usage::
from tornado.ioloop import IOLoop
IOLoop.configure('tornado.platform.asyncio.AsyncIOLoop')
IOLoop.current().start()
``asyncio`` default event loop.
Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object
can be accessed with the ``asyncio_loop`` attribute.
.. versionchanged:: 5.0
When an ``AsyncIOLoop`` becomes the current `.IOLoop`, it also sets
the current `asyncio` event loop.
.. deprecated:: 5.0
Now used automatically when appropriate; it is no longer necessary
to refer to this class directly.
"""
def initialize(self, **kwargs):
self.is_current = False
loop = asyncio.new_event_loop()
try:
super(AsyncIOLoop, self).initialize(loop, close_loop=True, **kwargs)
super(AsyncIOLoop, self).initialize(loop, **kwargs)
except Exception:
# If initialize() does not succeed (taking ownership of the loop),
# we have to close it.
loop.close()
raise
def close(self, all_fds=False):
if self.is_current:
self.clear_current()
super(AsyncIOLoop, self).close(all_fds=all_fds)
def make_current(self):
if not self.is_current:
try:
self.old_asyncio = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
self.old_asyncio = None
self.is_current = True
asyncio.set_event_loop(self.asyncio_loop)
def _clear_current_hook(self):
if self.is_current:
asyncio.set_event_loop(self.old_asyncio)
self.is_current = False
def to_tornado_future(asyncio_future):
"""Convert an `asyncio.Future` to a `tornado.concurrent.Future`.
.. versionadded:: 4.1
.. deprecated:: 5.0
Tornado ``Futures`` have been merged with `asyncio.Future`,
so this method is now a no-op.
"""
tf = tornado.concurrent.Future()
tornado.concurrent.chain_future(asyncio_future, tf)
return tf
return asyncio_future
def to_asyncio_future(tornado_future):
@ -211,12 +257,38 @@ def to_asyncio_future(tornado_future):
.. versionchanged:: 4.3
Now accepts any yieldable object, not just
`tornado.concurrent.Future`.
.. deprecated:: 5.0
Tornado ``Futures`` have been merged with `asyncio.Future`,
so this method is now equivalent to `tornado.gen.convert_yielded`.
"""
tornado_future = convert_yielded(tornado_future)
af = asyncio.Future()
tornado.concurrent.chain_future(tornado_future, af)
return af
return convert_yielded(tornado_future)
if hasattr(convert_yielded, 'register'):
convert_yielded.register(asyncio.Future, to_tornado_future) # type: ignore
class AnyThreadEventLoopPolicy(asyncio.DefaultEventLoopPolicy):
"""Event loop policy that allows loop creation on any thread.
The default `asyncio` event loop policy only automatically creates
event loops in the main threads. Other threads must create event
loops explicitly or `asyncio.get_event_loop` (and therefore
`.IOLoop.current`) will fail. Installing this policy allows event
loops to be created automatically on any thread, matching the
behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
Usage::
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
.. versionadded:: 5.0
"""
def get_event_loop(self):
try:
return super().get_event_loop()
except (RuntimeError, AssertionError):
# This was an AssertionError in python 3.4.2 (which ships with debian jessie)
# and changed to a RuntimeError in 3.4.3.
# "There is no current event loop in thread %r"
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#

View file

@ -19,11 +19,11 @@ class CaresResolver(Resolver):
the default for ``tornado.simple_httpclient``, but other libraries
may default to ``AF_UNSPEC``.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def initialize(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
def initialize(self):
self.io_loop = IOLoop.current()
self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
self.fds = {}

View file

@ -32,10 +32,12 @@ class Waker(interface.Waker):
and Jython.
"""
def __init__(self):
from .auto import set_close_exec
# Based on Zope select_trigger.py:
# https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py
self.writer = socket.socket()
set_close_exec(self.writer.fileno())
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up ASAP.
self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
@ -54,6 +56,7 @@ class Waker(interface.Waker):
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
set_close_exec(a.fileno())
a.bind(("127.0.0.1", 0))
a.listen(1)
connect_address = a.getsockname() # assigned (host, port) pair
@ -78,6 +81,7 @@ class Waker(interface.Waker):
a.close()
self.reader, addr = a.accept()
set_close_exec(self.reader.fileno())
self.reader.setblocking(0)
self.writer.setblocking(0)
a.close()

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#

View file

@ -32,7 +32,7 @@ import sys
import twisted.internet.abstract # type: ignore
from twisted.internet.defer import Deferred # type: ignore
from twisted.internet.posixbase import PosixReactorBase # type: ignore
from twisted.internet.interfaces import IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor # type: ignore
from twisted.internet.interfaces import IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor # type: ignore # noqa: E501
from twisted.python import failure, log # type: ignore
from twisted.internet import error # type: ignore
import twisted.names.cache # type: ignore
@ -42,7 +42,7 @@ import twisted.names.resolve # type: ignore
from zope.interface import implementer # type: ignore
from tornado.concurrent import Future
from tornado.concurrent import Future, future_set_exc_info
from tornado.escape import utf8
from tornado import gen
import tornado.ioloop
@ -112,7 +112,7 @@ class TornadoReactor(PosixReactorBase):
instead of ``reactor.run()``.
It is also possible to create a non-global reactor by calling
``tornado.platform.twisted.TornadoReactor(io_loop)``. However, if
``tornado.platform.twisted.TornadoReactor()``. However, if
the `.IOLoop` and reactor are to be short-lived (such as those used in
unit tests), additional cleanup may be required. Specifically, it is
recommended to call::
@ -122,13 +122,11 @@ class TornadoReactor(PosixReactorBase):
before closing the `.IOLoop`.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def __init__(self, io_loop=None):
if not io_loop:
io_loop = tornado.ioloop.IOLoop.current()
self._io_loop = io_loop
def __init__(self):
self._io_loop = tornado.ioloop.IOLoop.current()
self._readers = {} # map of reader objects to fd
self._writers = {} # map of writer objects to fd
self._fds = {} # a map of fd to a (reader, writer) tuple
@ -319,7 +317,10 @@ class _TestReactor(TornadoReactor):
"""
def __init__(self):
# always use a new ioloop
super(_TestReactor, self).__init__(IOLoop())
IOLoop.clear_current()
IOLoop(make_current=True)
super(_TestReactor, self).__init__()
IOLoop.clear_current()
def listenTCP(self, port, factory, backlog=50, interface=''):
# default to localhost to avoid firewall prompts on the mac
@ -335,7 +336,7 @@ class _TestReactor(TornadoReactor):
port, protocol, interface=interface, maxPacketSize=maxPacketSize)
def install(io_loop=None):
def install():
"""Install this package as the default Twisted reactor.
``install()`` must be called very early in the startup process,
@ -346,13 +347,11 @@ def install(io_loop=None):
in multi-process mode, and an external process manager such as
``supervisord`` is recommended instead.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
if not io_loop:
io_loop = tornado.ioloop.IOLoop.current()
reactor = TornadoReactor(io_loop)
reactor = TornadoReactor()
from twisted.internet.main import installReactor # type: ignore
installReactor(reactor)
return reactor
@ -384,6 +383,8 @@ class _FD(object):
self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR)
self.lost = True
writeConnectionLost = readConnectionLost = connectionLost
def logPrefix(self):
return ''
@ -519,21 +520,20 @@ class TwistedResolver(Resolver):
recommended only when threads cannot be used, since it has
limitations compared to the standard ``getaddrinfo``-based
`~tornado.netutil.Resolver` and
`~tornado.netutil.ThreadedResolver`. Specifically, it returns at
`~tornado.netutil.DefaultExecutorResolver`. Specifically, it returns at
most one result, and arguments other than ``host`` and ``family``
are ignored. It may fail to resolve when ``family`` is not
``socket.AF_UNSPEC``.
Requires Twisted 12.1 or newer.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def initialize(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
def initialize(self):
# partial copy of twisted.names.client.createResolver, which doesn't
# allow for a reactor to be passed in.
self.reactor = tornado.platform.twisted.TornadoReactor(io_loop)
self.reactor = tornado.platform.twisted.TornadoReactor()
host_resolver = twisted.names.hosts.Resolver('/etc/hosts')
cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
@ -554,7 +554,9 @@ class TwistedResolver(Resolver):
resolved_family = socket.AF_INET6
else:
deferred = self.resolver.getHostByName(utf8(host))
resolved = yield gen.Task(deferred.addBoth)
fut = Future()
deferred.addBoth(fut.set_result)
resolved = yield fut
if isinstance(resolved, failure.Failure):
try:
resolved.raiseException()
@ -586,6 +588,6 @@ if hasattr(gen.convert_yielded, 'register'):
# Should never happen, but just in case
raise Exception("errback called without error")
except:
f.set_exc_info(sys.exc_info())
future_set_exc_info(f, sys.exc_info())
d.addCallbacks(f.set_result, errback)
return f

View file

@ -8,7 +8,7 @@ import ctypes.wintypes # type: ignore
# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD) # noqa: E501
SetHandleInformation.restype = ctypes.wintypes.BOOL
HANDLE_FLAG_INHERIT = 0x00000001

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
@ -29,7 +28,7 @@ import time
from binascii import hexlify
from tornado.concurrent import Future
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado import ioloop
from tornado.iostream import PipeIOStream
from tornado.log import gen_log
@ -126,10 +125,6 @@ def fork_processes(num_processes, max_restarts=100):
assert _task_id is None
if num_processes is None or num_processes <= 0:
num_processes = cpu_count()
if ioloop.IOLoop.initialized():
raise RuntimeError("Cannot run in multiple processes: IOLoop instance "
"has already been initialized. You cannot call "
"IOLoop.instance() before calling start_processes()")
gen_log.info("Starting %d processes", num_processes)
children = {}
@ -199,16 +194,17 @@ class Subprocess(object):
* ``stdin``, ``stdout``, and ``stderr`` may have the value
``tornado.process.Subprocess.STREAM``, which will make the corresponding
attribute of the resulting Subprocess a `.PipeIOStream`.
* A new keyword argument ``io_loop`` may be used to pass in an IOLoop.
attribute of the resulting Subprocess a `.PipeIOStream`. If this option
is used, the caller is responsible for closing the streams when done
with them.
The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and
``wait_for_exit`` methods do not work on Windows. There is
therefore no reason to use this class instead of
``subprocess.Popen`` on that platform.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
STREAM = object()
@ -217,7 +213,7 @@ class Subprocess(object):
_waiting = {} # type: ignore
def __init__(self, *args, **kwargs):
self.io_loop = kwargs.pop('io_loop', None) or ioloop.IOLoop.current()
self.io_loop = ioloop.IOLoop.current()
# All FDs we create should be closed on error; those in to_close
# should be closed in the parent process on success.
pipe_fds = []
@ -227,19 +223,19 @@ class Subprocess(object):
kwargs['stdin'] = in_r
pipe_fds.extend((in_r, in_w))
to_close.append(in_r)
self.stdin = PipeIOStream(in_w, io_loop=self.io_loop)
self.stdin = PipeIOStream(in_w)
if kwargs.get('stdout') is Subprocess.STREAM:
out_r, out_w = _pipe_cloexec()
kwargs['stdout'] = out_w
pipe_fds.extend((out_r, out_w))
to_close.append(out_w)
self.stdout = PipeIOStream(out_r, io_loop=self.io_loop)
self.stdout = PipeIOStream(out_r)
if kwargs.get('stderr') is Subprocess.STREAM:
err_r, err_w = _pipe_cloexec()
kwargs['stderr'] = err_w
pipe_fds.extend((err_r, err_w))
to_close.append(err_w)
self.stderr = PipeIOStream(err_r, io_loop=self.io_loop)
self.stderr = PipeIOStream(err_r)
try:
self.proc = subprocess.Popen(*args, **kwargs)
except:
@ -270,7 +266,7 @@ class Subprocess(object):
signal handler is causing a problem.
"""
self._exit_callback = stack_context.wrap(callback)
Subprocess.initialize(self.io_loop)
Subprocess.initialize()
Subprocess._waiting[self.pid] = self
Subprocess._try_cleanup_process(self.pid)
@ -297,12 +293,12 @@ class Subprocess(object):
# Unfortunately we don't have the original args any more.
future.set_exception(CalledProcessError(ret, None))
else:
future.set_result(ret)
future_set_result_unless_cancelled(future, ret)
self.set_exit_callback(callback)
return future
@classmethod
def initialize(cls, io_loop=None):
def initialize(cls):
"""Initializes the ``SIGCHLD`` handler.
The signal handler is run on an `.IOLoop` to avoid locking issues.
@ -310,13 +306,13 @@ class Subprocess(object):
same one used by individual Subprocess objects (as long as the
``IOLoops`` are each running in separate threads).
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been
removed.
"""
if cls._initialized:
return
if io_loop is None:
io_loop = ioloop.IOLoop.current()
io_loop = ioloop.IOLoop.current()
cls._old_sigchld = signal.signal(
signal.SIGCHLD,
lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup))

View file

@ -12,7 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Asynchronous queues for coroutines.
"""Asynchronous queues for coroutines. These classes are very similar
to those provided in the standard library's `asyncio package
<https://docs.python.org/3/library/asyncio-queue.html>`_.
.. warning::
@ -20,6 +22,7 @@
are *not* thread-safe. To use these queues from another thread,
use `.IOLoop.add_callback` to transfer control to the `.IOLoop` thread
before calling any queue methods.
"""
from __future__ import absolute_import, division, print_function
@ -28,7 +31,7 @@ import collections
import heapq
from tornado import gen, ioloop
from tornado.concurrent import Future
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado.locks import Event
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
@ -47,7 +50,8 @@ class QueueFull(Exception):
def _set_timeout(future, timeout):
if timeout:
def on_timeout():
future.set_exception(gen.TimeoutError())
if not future.done():
future.set_exception(gen.TimeoutError())
io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
future.add_done_callback(
@ -166,18 +170,23 @@ class Queue(object):
def put(self, item, timeout=None):
"""Put an item into the queue, perhaps waiting until there is room.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
Returns a Future, which raises `tornado.util.TimeoutError` after a
timeout.
``timeout`` may be a number denoting a time (on the same
scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time.
"""
future = Future()
try:
self.put_nowait(item)
except QueueFull:
future = Future()
self._putters.append((item, future))
_set_timeout(future, timeout)
return future
else:
return gen._null_future
future.set_result(None)
return future
def put_nowait(self, item):
"""Put an item into the queue without blocking.
@ -189,7 +198,7 @@ class Queue(object):
assert self.empty(), "queue non-empty, why are getters waiting?"
getter = self._getters.popleft()
self.__put_internal(item)
getter.set_result(self._get())
future_set_result_unless_cancelled(getter, self._get())
elif self.full():
raise QueueFull
else:
@ -199,7 +208,12 @@ class Queue(object):
"""Remove and return an item from the queue.
Returns a Future which resolves once an item is available, or raises
`tornado.gen.TimeoutError` after a timeout.
`tornado.util.TimeoutError` after a timeout.
``timeout`` may be a number denoting a time (on the same
scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time.
"""
future = Future()
try:
@ -220,7 +234,7 @@ class Queue(object):
assert self.full(), "queue not full, why are putters waiting?"
item, putter = self._putters.popleft()
self.__put_internal(item)
putter.set_result(None)
future_set_result_unless_cancelled(putter, None)
return self._get()
elif self.qsize():
return self._get()
@ -248,12 +262,11 @@ class Queue(object):
def join(self, timeout=None):
"""Block until all items in the queue are processed.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
Returns a Future, which raises `tornado.util.TimeoutError` after a
timeout.
"""
return self._finished.wait(timeout)
@gen.coroutine
def __aiter__(self):
return _QueueIterator(self)

View file

@ -242,6 +242,11 @@ class _RoutingDelegate(httputil.HTTPMessageDelegate):
start_line=start_line, headers=headers)
self.delegate = self.router.find_handler(request)
if self.delegate is None:
app_log.debug("Delegate for %s %s request not found",
start_line.method, start_line.path)
self.delegate = _DefaultMessageDelegate(self.request_conn)
return self.delegate.headers_received(start_line, headers)
def data_received(self, chunk):
@ -254,6 +259,16 @@ class _RoutingDelegate(httputil.HTTPMessageDelegate):
self.delegate.on_connection_close()
class _DefaultMessageDelegate(httputil.HTTPMessageDelegate):
def __init__(self, connection):
self.connection = connection
def finish(self):
self.connection.write_headers(
httputil.ResponseStartLine("HTTP/1.1", 404, "Not Found"), httputil.HTTPHeaders())
self.connection.finish()
class RuleRouter(Router):
"""Rule-based router implementation."""
@ -278,7 +293,8 @@ class RuleRouter(Router):
])
In the examples above, ``Target`` can be a nested `Router` instance, an instance of
`~.httputil.HTTPServerConnectionDelegate` or an old-style callable, accepting a request argument.
`~.httputil.HTTPServerConnectionDelegate` or an old-style callable,
accepting a request argument.
:arg rules: a list of `Rule` instances or tuples of `Rule`
constructor arguments.
@ -567,7 +583,7 @@ class PathMatches(Matcher):
else:
try:
unescaped_fragment = re_unescape(fragment)
except ValueError as exc:
except ValueError:
# If we can't unescape part of it, we can't
# reverse this url.
return (None, None)
@ -589,7 +605,7 @@ class URLSpec(Rule):
* ``pattern``: Regular expression to be matched. Any capturing
groups in the regex will be passed in to the handler's
get/post/etc methods as arguments (by keyword if named, by
position if unnamed. Named and unnamed capturing groups may
position if unnamed. Named and unnamed capturing groups
may not be mixed in the same rule).
* ``handler``: `~.web.RequestHandler` subclass to be invoked.

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from tornado.escape import utf8, _unicode
@ -6,6 +5,7 @@ from tornado import gen
from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy
from tornado import httputil
from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.netutil import Resolver, OverrideResolver, _client_ssl_defaults
from tornado.log import gen_log
@ -34,18 +34,6 @@ except ImportError:
# ssl is not available on Google App Engine.
ssl = None
try:
import certifi
except ImportError:
certifi = None
def _default_ca_certs():
if certifi is None:
raise Exception("The 'certifi' package is required to use https "
"in simple_httpclient")
return certifi.where()
class SimpleAsyncHTTPClient(AsyncHTTPClient):
"""Non-blocking HTTP client with no external dependencies.
@ -56,7 +44,7 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
are not reused, and callers cannot select the network interface to be
used.
"""
def initialize(self, io_loop, max_clients=10,
def initialize(self, max_clients=10,
hostname_mapping=None, max_buffer_size=104857600,
resolver=None, defaults=None, max_header_size=None,
max_body_size=None):
@ -92,8 +80,7 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
.. versionchanged:: 4.2
Added the ``max_body_size`` argument.
"""
super(SimpleAsyncHTTPClient, self).initialize(io_loop,
defaults=defaults)
super(SimpleAsyncHTTPClient, self).initialize(defaults=defaults)
self.max_clients = max_clients
self.queue = collections.deque()
self.active = {}
@ -107,12 +94,12 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
self.resolver = resolver
self.own_resolver = False
else:
self.resolver = Resolver(io_loop=io_loop)
self.resolver = Resolver()
self.own_resolver = True
if hostname_mapping is not None:
self.resolver = OverrideResolver(resolver=self.resolver,
mapping=hostname_mapping)
self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop)
self.tcp_client = TCPClient(resolver=self.resolver)
def close(self):
super(SimpleAsyncHTTPClient, self).close()
@ -153,7 +140,7 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
def _handle_request(self, request, release_callback, final_callback):
self._connection_class()(
self.io_loop, self, request, release_callback,
self, request, release_callback,
final_callback, self.max_buffer_size, self.tcp_client,
self.max_header_size, self.max_body_size)
@ -190,11 +177,11 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
class _HTTPConnection(httputil.HTTPMessageDelegate):
_SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"])
def __init__(self, io_loop, client, request, release_callback,
def __init__(self, client, request, release_callback,
final_callback, max_buffer_size, tcp_client,
max_header_size, max_body_size):
self.start_time = io_loop.time()
self.io_loop = io_loop
self.io_loop = IOLoop.current()
self.start_time = self.io_loop.time()
self.client = client
self.request = request
self.release_callback = release_callback
@ -240,10 +227,10 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
self._timeout = self.io_loop.add_timeout(
self.start_time + timeout,
stack_context.wrap(functools.partial(self._on_timeout, "while connecting")))
self.tcp_client.connect(host, port, af=af,
ssl_options=ssl_options,
max_buffer_size=self.max_buffer_size,
callback=self._on_connect)
fut = self.tcp_client.connect(host, port, af=af,
ssl_options=ssl_options,
max_buffer_size=self.max_buffer_size)
fut.add_done_callback(stack_context.wrap(self._on_connect))
def _get_ssl_options(self, scheme):
if scheme == "https":
@ -256,42 +243,19 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
self.request.client_cert is None and
self.request.client_key is None):
return _client_ssl_defaults
ssl_options = {}
if self.request.validate_cert:
ssl_options["cert_reqs"] = ssl.CERT_REQUIRED
if self.request.ca_certs is not None:
ssl_options["ca_certs"] = self.request.ca_certs
elif not hasattr(ssl, 'create_default_context'):
# When create_default_context is present,
# we can omit the "ca_certs" parameter entirely,
# which avoids the dependency on "certifi" for py34.
ssl_options["ca_certs"] = _default_ca_certs()
if self.request.client_key is not None:
ssl_options["keyfile"] = self.request.client_key
ssl_ctx = ssl.create_default_context(
ssl.Purpose.SERVER_AUTH,
cafile=self.request.ca_certs)
if not self.request.validate_cert:
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
if self.request.client_cert is not None:
ssl_options["certfile"] = self.request.client_cert
# SSL interoperability is tricky. We want to disable
# SSLv2 for security reasons; it wasn't disabled by default
# until openssl 1.0. The best way to do this is to use
# the SSL_OP_NO_SSLv2, but that wasn't exposed to python
# until 3.2. Python 2.7 adds the ciphers argument, which
# can also be used to disable SSLv2. As a last resort
# on python 2.6, we set ssl_version to TLSv1. This is
# more narrow than we'd like since it also breaks
# compatibility with servers configured for SSLv3 only,
# but nearly all servers support both SSLv3 and TLSv1:
# http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html
if sys.version_info >= (2, 7):
# In addition to disabling SSLv2, we also exclude certain
# classes of insecure ciphers.
ssl_options["ciphers"] = "DEFAULT:!SSLv2:!EXPORT:!DES"
else:
# This is really only necessary for pre-1.0 versions
# of openssl, but python 2.6 doesn't expose version
# information.
ssl_options["ssl_version"] = ssl.PROTOCOL_TLSv1
return ssl_options
ssl_ctx.load_cert_chain(self.request.client_cert,
self.request.client_key)
if hasattr(ssl, 'OP_NO_COMPRESSION'):
# See netutil.ssl_options_to_context
ssl_ctx.options |= ssl.OP_NO_COMPRESSION
return ssl_ctx
return None
def _on_timeout(self, info=None):
@ -311,7 +275,8 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def _on_connect(self, stream):
def _on_connect(self, stream_fut):
stream = stream_fut.result()
if self.final_callback is None:
# final_callback is cleared if we've hit our timeout.
stream.close()

View file

@ -1,9 +1,12 @@
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <stdint.h>
static PyObject* websocket_mask(PyObject* self, PyObject* args) {
const char* mask;
Py_ssize_t mask_len;
uint32_t uint32_mask;
uint64_t uint64_mask;
const char* data;
Py_ssize_t data_len;
Py_ssize_t i;
@ -14,13 +17,35 @@ static PyObject* websocket_mask(PyObject* self, PyObject* args) {
return NULL;
}
uint32_mask = ((uint32_t*)mask)[0];
result = PyBytes_FromStringAndSize(NULL, data_len);
if (!result) {
return NULL;
}
buf = PyBytes_AsString(result);
if (sizeof(size_t) >= 8) {
uint64_mask = uint32_mask;
uint64_mask = (uint64_mask << 32) | uint32_mask;
while (data_len >= 8) {
((uint64_t*)buf)[0] = ((uint64_t*)data)[0] ^ uint64_mask;
data += 8;
buf += 8;
data_len -= 8;
}
}
while (data_len >= 4) {
((uint32_t*)buf)[0] = ((uint32_t*)data)[0] ^ uint32_mask;
data += 4;
buf += 4;
data_len -= 4;
}
for (i = 0; i < data_len; i++) {
buf[i] = data[i] ^ mask[i % 4];
buf[i] = data[i] ^ mask[i];
}
return result;

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2010 Facebook
#

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2014 Facebook
#
@ -20,12 +19,17 @@ from __future__ import absolute_import, division, print_function
import functools
import socket
import numbers
import datetime
from tornado.concurrent import Future
from tornado.concurrent import Future, future_add_done_callback
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado import gen
from tornado.netutil import Resolver
from tornado.platform.auto import set_close_exec
from tornado.gen import TimeoutError
from tornado.util import timedelta_to_seconds
_INITIAL_CONNECT_TIMEOUT = 0.3
@ -47,15 +51,17 @@ class _Connector(object):
http://tools.ietf.org/html/rfc6555
"""
def __init__(self, addrinfo, io_loop, connect):
self.io_loop = io_loop
def __init__(self, addrinfo, connect):
self.io_loop = IOLoop.current()
self.connect = connect
self.future = Future()
self.timeout = None
self.connect_timeout = None
self.last_error = None
self.remaining = len(addrinfo)
self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
self.streams = set()
@staticmethod
def split(addrinfo):
@ -77,9 +83,11 @@ class _Connector(object):
secondary.append((af, addr))
return primary, secondary
def start(self, timeout=_INITIAL_CONNECT_TIMEOUT):
def start(self, timeout=_INITIAL_CONNECT_TIMEOUT, connect_timeout=None):
self.try_connect(iter(self.primary_addrs))
self.set_timout(timeout)
self.set_timeout(timeout)
if connect_timeout is not None:
self.set_connect_timeout(connect_timeout)
return self.future
def try_connect(self, addrs):
@ -93,9 +101,10 @@ class _Connector(object):
self.future.set_exception(self.last_error or
IOError("connection failed"))
return
future = self.connect(af, addr)
future.add_done_callback(functools.partial(self.on_connect_done,
addrs, af, addr))
stream, future = self.connect(af, addr)
self.streams.add(stream)
future_add_done_callback(
future, functools.partial(self.on_connect_done, addrs, af, addr))
def on_connect_done(self, addrs, af, addr, future):
self.remaining -= 1
@ -114,39 +123,60 @@ class _Connector(object):
self.io_loop.remove_timeout(self.timeout)
self.on_timeout()
return
self.clear_timeout()
self.clear_timeouts()
if self.future.done():
# This is a late arrival; just drop it.
stream.close()
else:
self.streams.discard(stream)
self.future.set_result((af, addr, stream))
self.close_streams()
def set_timout(self, timeout):
def set_timeout(self, timeout):
self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
self.on_timeout)
def on_timeout(self):
self.timeout = None
self.try_connect(iter(self.secondary_addrs))
if not self.future.done():
self.try_connect(iter(self.secondary_addrs))
def clear_timeout(self):
if self.timeout is not None:
self.io_loop.remove_timeout(self.timeout)
def set_connect_timeout(self, connect_timeout):
self.connect_timeout = self.io_loop.add_timeout(
connect_timeout, self.on_connect_timeout)
def on_connect_timeout(self):
if not self.future.done():
self.future.set_exception(TimeoutError())
self.close_streams()
def clear_timeouts(self):
if self.timeout is not None:
self.io_loop.remove_timeout(self.timeout)
if self.connect_timeout is not None:
self.io_loop.remove_timeout(self.connect_timeout)
def close_streams(self):
for stream in self.streams:
stream.close()
class TCPClient(object):
"""A non-blocking TCP connection factory.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def __init__(self, resolver=None, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
def __init__(self, resolver=None):
if resolver is not None:
self.resolver = resolver
self._own_resolver = False
else:
self.resolver = Resolver(io_loop=io_loop)
self.resolver = Resolver()
self._own_resolver = True
def close(self):
@ -155,7 +185,8 @@ class TCPClient(object):
@gen.coroutine
def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
max_buffer_size=None, source_ip=None, source_port=None):
max_buffer_size=None, source_ip=None, source_port=None,
timeout=None):
"""Connect to the given host and port.
Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
@ -167,25 +198,48 @@ class TCPClient(object):
use a specific interface, it has to be handled outside
of Tornado as this depends very much on the platform.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
Similarly, when the user requires a certain source port, it can
be specified using the ``source_port`` arg.
.. versionchanged:: 4.5
Added the ``source_ip`` and ``source_port`` arguments.
.. versionchanged:: 5.0
Added the ``timeout`` argument.
"""
addrinfo = yield self.resolver.resolve(host, port, af)
if timeout is not None:
if isinstance(timeout, numbers.Real):
timeout = IOLoop.current().time() + timeout
elif isinstance(timeout, datetime.timedelta):
timeout = IOLoop.current().time() + timedelta_to_seconds(timeout)
else:
raise TypeError("Unsupported timeout %r" % timeout)
if timeout is not None:
addrinfo = yield gen.with_timeout(
timeout, self.resolver.resolve(host, port, af))
else:
addrinfo = yield self.resolver.resolve(host, port, af)
connector = _Connector(
addrinfo, self.io_loop,
addrinfo,
functools.partial(self._create_stream, max_buffer_size,
source_ip=source_ip, source_port=source_port)
)
af, addr, stream = yield connector.start()
af, addr, stream = yield connector.start(connect_timeout=timeout)
# TODO: For better performance we could cache the (af, addr)
# information here and re-use it on subsequent connections to
# the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
if ssl_options is not None:
stream = yield stream.start_tls(False, ssl_options=ssl_options,
server_hostname=host)
if timeout is not None:
stream = yield gen.with_timeout(timeout, stream.start_tls(
False, ssl_options=ssl_options, server_hostname=host))
else:
stream = yield stream.start_tls(False, ssl_options=ssl_options,
server_hostname=host)
raise gen.Return(stream)
def _create_stream(self, max_buffer_size, af, addr, source_ip=None,
@ -202,6 +256,7 @@ class TCPClient(object):
# - 127.0.0.1 for IPv4
# - ::1 for IPv6
socket_obj = socket.socket(af)
set_close_exec(socket_obj.fileno())
if source_port_bind or source_ip_bind:
# If the user requires binding also to a specific IP/port.
try:
@ -212,11 +267,10 @@ class TCPClient(object):
raise
try:
stream = IOStream(socket_obj,
io_loop=self.io_loop,
max_buffer_size=max_buffer_size)
except socket.error as e:
fu = Future()
fu.set_exception(e)
return fu
else:
return stream.connect(addr)
return stream, stream.connect(addr)

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
@ -102,12 +101,15 @@ class TCPServer(object):
.. versionadded:: 3.1
The ``max_buffer_size`` argument.
.. versionchanged:: 5.0
The ``io_loop`` argument has been removed.
"""
def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None,
def __init__(self, ssl_options=None, max_buffer_size=None,
read_chunk_size=None):
self.io_loop = io_loop
self.ssl_options = ssl_options
self._sockets = {} # fd -> socket object
self._sockets = {} # fd -> socket object
self._handlers = {} # fd -> remove_handler callable
self._pending_sockets = []
self._started = False
self._stopped = False
@ -151,13 +153,10 @@ class TCPServer(object):
method and `tornado.process.fork_processes` to provide greater
control over the initialization of a multi-process server.
"""
if self.io_loop is None:
self.io_loop = IOLoop.current()
for sock in sockets:
self._sockets[sock.fileno()] = sock
add_accept_handler(sock, self._handle_connection,
io_loop=self.io_loop)
self._handlers[sock.fileno()] = add_accept_handler(
sock, self._handle_connection)
def add_socket(self, socket):
"""Singular version of `add_sockets`. Takes a single socket object."""
@ -234,7 +233,8 @@ class TCPServer(object):
self._stopped = True
for fd, sock in self._sockets.items():
assert sock.fileno() == fd
self.io_loop.remove_handler(fd)
# Unregister socket from IOLoop
self._handlers.pop(fd)()
sock.close()
def handle_stream(self, stream, address):
@ -284,17 +284,17 @@ class TCPServer(object):
raise
try:
if self.ssl_options is not None:
stream = SSLIOStream(connection, io_loop=self.io_loop,
stream = SSLIOStream(connection,
max_buffer_size=self.max_buffer_size,
read_chunk_size=self.read_chunk_size)
else:
stream = IOStream(connection, io_loop=self.io_loop,
stream = IOStream(connection,
max_buffer_size=self.max_buffer_size,
read_chunk_size=self.read_chunk_size)
future = self.handle_stream(stream, address)
if future is not None:
self.io_loop.add_future(gen.convert_yielded(future),
lambda f: f.result())
IOLoop.current().add_future(gen.convert_yielded(future),
lambda f: f.result())
except Exception:
app_log.error("Error in connection callback", exc_info=True)

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
@ -260,9 +259,8 @@ class Template(object):
:arg str template_string: the contents of the template file.
:arg str name: the filename from which the template was loaded
(used for error message).
:arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible for this template,
used to resolve ``{% include %}`` and ``{% extend %}``
directives.
:arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible
for this template, used to resolve ``{% include %}`` and ``{% extend %}`` directives.
:arg bool compress_whitespace: Deprecated since Tornado 4.3.
Equivalent to ``whitespace="single"`` if true and
``whitespace="all"`` if false.

View file

@ -1,10 +1,9 @@
#!/usr/bin/env python
"""Support classes for automated testing.
* `AsyncTestCase` and `AsyncHTTPTestCase`: Subclasses of unittest.TestCase
with additional support for testing asynchronous (`.IOLoop`-based) code.
* `ExpectLog` and `LogTrapTestCase`: Make test logs less spammy.
* `ExpectLog`: Make test logs less spammy.
* `main()`: A simple test runner (wrapper around unittest.main()) with support
for the tornado.autoreload module to rerun the tests when code changes.
@ -22,7 +21,7 @@ try:
from tornado.process import Subprocess
except ImportError:
# These modules are not importable on app engine. Parts of this module
# won't work, but e.g. LogTrapTestCase and main() will.
# won't work, but e.g. main() will.
AsyncHTTPClient = None # type: ignore
gen = None # type: ignore
HTTPServer = None # type: ignore
@ -30,7 +29,7 @@ except ImportError:
netutil = None # type: ignore
SimpleAsyncHTTPClient = None # type: ignore
Subprocess = None # type: ignore
from tornado.log import gen_log, app_log
from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext
from tornado.util import raise_exc_info, basestring_type, PY3
import functools
@ -42,10 +41,11 @@ import signal
import socket
import sys
if PY3:
from io import StringIO
else:
from cStringIO import StringIO
try:
import asyncio
except ImportError:
asyncio = None
try:
from collections.abc import Generator as GeneratorType # type: ignore
@ -73,24 +73,12 @@ else:
except ImportError:
import unittest # type: ignore
_next_port = 10000
def get_unused_port():
"""Returns a (hopefully) unused port number.
This function does not guarantee that the port it returns is available,
only that a series of get_unused_port calls in a single process return
distinct ports.
.. deprecated::
Use bind_unused_port instead, which is guaranteed to find an unused port.
"""
global _next_port
port = _next_port
_next_port = _next_port + 1
return port
if asyncio is None:
_NON_OWNED_IOLOOPS = ()
else:
import tornado.platform.asyncio
_NON_OWNED_IOLOOPS = tornado.platform.asyncio.AsyncIOMainLoop
def bind_unused_port(reuse_port=False):
"""Binds a server socket to an available port on localhost.
@ -166,8 +154,7 @@ class AsyncTestCase(unittest.TestCase):
callbacks should call ``self.stop()`` to signal completion.
By default, a new `.IOLoop` is constructed for each test and is available
as ``self.io_loop``. This `.IOLoop` should be used in the construction of
HTTP clients/servers, etc. If the code being tested requires a
as ``self.io_loop``. If the code being tested requires a
global `.IOLoop`, subclasses should override `get_new_ioloop` to return it.
The `.IOLoop`'s ``start`` and ``stop`` methods should not be
@ -182,7 +169,7 @@ class AsyncTestCase(unittest.TestCase):
class MyTestCase(AsyncTestCase):
@tornado.testing.gen_test
def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop)
client = AsyncHTTPClient()
response = yield client.fetch("http://www.tornadoweb.org")
# Test contents of response
self.assertIn("FriendFeed", response.body)
@ -190,7 +177,7 @@ class AsyncTestCase(unittest.TestCase):
# This test uses argument passing between self.stop and self.wait.
class MyTestCase2(AsyncTestCase):
def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop)
client = AsyncHTTPClient()
client.fetch("http://www.tornadoweb.org/", self.stop)
response = self.wait()
# Test contents of response
@ -199,7 +186,7 @@ class AsyncTestCase(unittest.TestCase):
# This test uses an explicit callback-based style.
class MyTestCase3(AsyncTestCase):
def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop)
client = AsyncHTTPClient()
client.fetch("http://www.tornadoweb.org/", self.handle_fetch)
self.wait()
@ -235,8 +222,7 @@ class AsyncTestCase(unittest.TestCase):
# Clean up Subprocess, so it can be used again with a new ioloop.
Subprocess.uninitialize()
self.io_loop.clear_current()
if (not IOLoop.initialized() or
self.io_loop is not IOLoop.instance()):
if not isinstance(self.io_loop, _NON_OWNED_IOLOOPS):
# Try to clean up any file descriptors left open in the ioloop.
# This avoids leaks, especially when tests are run repeatedly
# in the same process with autoreload (because curl does not
@ -250,9 +236,15 @@ class AsyncTestCase(unittest.TestCase):
self.__rethrow()
def get_new_ioloop(self):
"""Creates a new `.IOLoop` for this test. May be overridden in
subclasses for tests that require a specific `.IOLoop` (usually
the singleton `.IOLoop.instance()`).
"""Returns the `.IOLoop` to use for this test.
By default, a new `.IOLoop` is created for each test.
Subclasses may override this method to return
`.IOLoop.current()` if it is not appropriate to use a new
`.IOLoop` in each tests (for example, if there are global
singletons using the default `.IOLoop`) or if a per-test event
loop is being provided by another system (such as
``pytest-asyncio``).
"""
return IOLoop()
@ -321,7 +313,8 @@ class AsyncTestCase(unittest.TestCase):
except Exception:
self.__failure = sys.exc_info()
self.stop()
self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, timeout_func)
self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
timeout_func)
while True:
self.__running = True
self.io_loop.start()
@ -382,11 +375,10 @@ class AsyncHTTPTestCase(AsyncTestCase):
self.http_server.add_sockets([sock])
def get_http_client(self):
return AsyncHTTPClient(io_loop=self.io_loop)
return AsyncHTTPClient()
def get_http_server(self):
return HTTPServer(self._app, io_loop=self.io_loop,
**self.get_httpserver_options())
return HTTPServer(self._app, **self.get_httpserver_options())
def get_app(self):
"""Should be overridden by subclasses to return a
@ -395,14 +387,23 @@ class AsyncHTTPTestCase(AsyncTestCase):
raise NotImplementedError()
def fetch(self, path, **kwargs):
"""Convenience method to synchronously fetch a url.
"""Convenience method to synchronously fetch a URL.
The given path will be appended to the local server's host and
port. Any additional kwargs will be passed directly to
`.AsyncHTTPClient.fetch` (and so could be used to pass
``method="POST"``, ``body="..."``, etc).
If the path begins with http:// or https://, it will be treated as a
full URL and will be fetched as-is.
.. versionchanged:: 5.0
Added support for absolute URLs.
"""
self.http_client.fetch(self.get_url(path), self.stop, **kwargs)
if path.lower().startswith(('http://', 'https://')):
self.http_client.fetch(path, self.stop, **kwargs)
else:
self.http_client.fetch(self.get_url(path), self.stop, **kwargs)
return self.wait()
def get_httpserver_options(self):
@ -423,16 +424,14 @@ class AsyncHTTPTestCase(AsyncTestCase):
def get_url(self, path):
"""Returns an absolute url for the given path on the test server."""
return '%s://localhost:%s%s' % (self.get_protocol(),
return '%s://127.0.0.1:%s%s' % (self.get_protocol(),
self.get_http_port(), path)
def tearDown(self):
self.http_server.stop()
self.io_loop.run_sync(self.http_server.close_all_connections,
timeout=get_async_test_timeout())
if (not IOLoop.initialized() or
self.http_client.io_loop is not IOLoop.instance()):
self.http_client.close()
self.http_client.close()
super(AsyncHTTPTestCase, self).tearDown()
@ -442,7 +441,7 @@ class AsyncHTTPSTestCase(AsyncHTTPTestCase):
Interface is generally the same as `AsyncHTTPTestCase`.
"""
def get_http_client(self):
return AsyncHTTPClient(io_loop=self.io_loop, force_instance=True,
return AsyncHTTPClient(force_instance=True,
defaults=dict(validate_cert=False))
def get_httpserver_options(self):
@ -454,7 +453,8 @@ class AsyncHTTPSTestCase(AsyncHTTPTestCase):
By default includes a self-signed testing certificate.
"""
# Testing keys were generated with:
# openssl req -new -keyout tornado/test/test.key -out tornado/test/test.crt -nodes -days 3650 -x509
# openssl req -new -keyout tornado/test/test.key \
# -out tornado/test/test.crt -nodes -days 3650 -x509
module_dir = os.path.dirname(__file__)
return dict(
certfile=os.path.join(module_dir, 'test', 'test.crt'),
@ -476,7 +476,7 @@ def gen_test(func=None, timeout=None):
class MyTest(AsyncHTTPTestCase):
@gen_test
def test_something(self):
response = yield gen.Task(self.fetch('/'))
response = yield self.http_client.fetch(self.get_url('/'))
By default, ``@gen_test`` times out after 5 seconds. The timeout may be
overridden globally with the ``ASYNC_TEST_TIMEOUT`` environment variable,
@ -485,7 +485,11 @@ def gen_test(func=None, timeout=None):
class MyTest(AsyncHTTPTestCase):
@gen_test(timeout=10)
def test_something_slow(self):
response = yield gen.Task(self.fetch('/'))
response = yield self.http_client.fetch(self.get_url('/'))
Note that ``@gen_test`` is incompatible with `AsyncTestCase.stop`,
`AsyncTestCase.wait`, and `AsyncHTTPTestCase.fetch`. Use ``yield
self.http_client.fetch(self.get_url())`` as shown above instead.
.. versionadded:: 3.1
The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment
@ -494,6 +498,7 @@ def gen_test(func=None, timeout=None):
.. versionchanged:: 4.0
The wrapper now passes along ``*args, **kwargs`` so it can be used
on functions with arguments.
"""
if timeout is None:
timeout = get_async_test_timeout()
@ -529,12 +534,17 @@ def gen_test(func=None, timeout=None):
timeout=timeout)
except TimeoutError as e:
# run_sync raises an error with an unhelpful traceback.
# Throw it back into the generator or coroutine so the stack
# trace is replaced by the point where the test is stopped.
self._test_generator.throw(e)
# In case the test contains an overly broad except clause,
# we may get back here. In this case re-raise the original
# exception, which is better than nothing.
# If the underlying generator is still running, we can throw the
# exception back into it so the stack trace is replaced by the
# point where the test is stopped. The only reason the generator
# would not be running would be if it were cancelled, which means
# a native coroutine, so we can rely on the cr_running attribute.
if getattr(self._test_generator, 'cr_running', True):
self._test_generator.throw(e)
# In case the test contains an overly broad except
# clause, we may get back here.
# Coroutine was stopped or didn't raise a useful stack trace,
# so re-raise the original exception which is better than nothing.
raise
return post_coroutine
@ -554,49 +564,6 @@ def gen_test(func=None, timeout=None):
gen_test.__test__ = False # type: ignore
class LogTrapTestCase(unittest.TestCase):
"""A test case that captures and discards all logging output
if the test passes.
Some libraries can produce a lot of logging output even when
the test succeeds, so this class can be useful to minimize the noise.
Simply use it as a base class for your test case. It is safe to combine
with AsyncTestCase via multiple inheritance
(``class MyTestCase(AsyncHTTPTestCase, LogTrapTestCase):``)
This class assumes that only one log handler is configured and
that it is a `~logging.StreamHandler`. This is true for both
`logging.basicConfig` and the "pretty logging" configured by
`tornado.options`. It is not compatible with other log buffering
mechanisms, such as those provided by some test runners.
.. deprecated:: 4.1
Use the unittest module's ``--buffer`` option instead, or `.ExpectLog`.
"""
def run(self, result=None):
logger = logging.getLogger()
if not logger.handlers:
logging.basicConfig()
handler = logger.handlers[0]
if (len(logger.handlers) > 1 or
not isinstance(handler, logging.StreamHandler)):
# Logging has been configured in a way we don't recognize,
# so just leave it alone.
super(LogTrapTestCase, self).run(result)
return
old_stream = handler.stream
try:
handler.stream = StringIO()
gen_log.info("RUNNING TEST: " + str(self))
old_error_count = len(result.failures) + len(result.errors)
super(LogTrapTestCase, self).run(result)
new_error_count = len(result.failures) + len(result.errors)
if new_error_count != old_error_count:
old_stream.write(handler.stream.getvalue())
finally:
handler.stream = old_stream
class ExpectLog(logging.Filter):
"""Context manager to capture and suppress expected log output.
@ -684,6 +651,12 @@ def main(**kwargs):
to show many test details as they are run.
See http://docs.python.org/library/unittest.html#unittest.main
for full argument list.
.. versionchanged:: 5.0
This function produces no output of its own; only that produced
by the `unittest` module (Previously it would add a PASS or FAIL
log message).
"""
from tornado.options import define, options, parse_command_line
@ -719,23 +692,16 @@ def main(**kwargs):
if __name__ == '__main__' and len(argv) == 1:
print("No tests specified", file=sys.stderr)
sys.exit(1)
try:
# In order to be able to run tests by their fully-qualified name
# on the command line without importing all tests here,
# module must be set to None. Python 3.2's unittest.main ignores
# defaultTest if no module is given (it tries to do its own
# test discovery, which is incompatible with auto2to3), so don't
# set module if we're not asking for a specific test.
if len(argv) > 1:
unittest.main(module=None, argv=argv, **kwargs)
else:
unittest.main(defaultTest="all", argv=argv, **kwargs)
except SystemExit as e:
if e.code == 0:
gen_log.info('PASS')
else:
gen_log.error('FAIL')
raise
# In order to be able to run tests by their fully-qualified name
# on the command line without importing all tests here,
# module must be set to None. Python 3.2's unittest.main ignores
# defaultTest if no module is given (it tries to do its own
# test discovery, which is incompatible with auto2to3), so don't
# set module if we're not asking for a specific test.
if len(argv) > 1:
unittest.main(module=None, argv=argv, **kwargs)
else:
unittest.main(defaultTest="all", argv=argv, **kwargs)
if __name__ == '__main__':

View file

@ -84,6 +84,16 @@ except ImportError:
is_finalizing = _get_emulated_is_finalizing()
class TimeoutError(Exception):
"""Exception raised by `.with_timeout` and `.IOLoop.run_sync`.
.. versionchanged:: 5.0:
Unified ``tornado.gen.TimeoutError`` and
``tornado.ioloop.TimeoutError`` as ``tornado.util.TimeoutError``.
Both former names remain as aliases.
"""
class ObjectDict(_ObjectDictBase):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
@ -272,6 +282,12 @@ class Configurable(object):
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
.. versionchanged:: 5.0
It is now possible for configuration to be specified at
multiple levels of a class hierarchy.
"""
__impl_class = None # type: type
__impl_kwargs = None # type: Dict[str, Any]
@ -286,6 +302,9 @@ class Configurable(object):
else:
impl = cls
init_kwargs.update(kwargs)
if impl.configurable_base() is not base:
# The impl class is itself configurable, so recurse.
return impl(*args, **init_kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatibility with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
@ -343,7 +362,10 @@ class Configurable(object):
# type: () -> type
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
# Manually mangle the private name to see whether this base
# has been configured (and not another base higher in the
# hierarchy).
if base.__dict__.get('_Configurable__impl_class') is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
@ -47,12 +46,14 @@ Thread-safety notes
-------------------
In general, methods on `RequestHandler` and elsewhere in Tornado are
not thread-safe. In particular, methods such as
not thread-safe. In particular, methods such as
`~RequestHandler.write()`, `~RequestHandler.finish()`, and
`~RequestHandler.flush()` must only be called from the main thread. If
`~RequestHandler.flush()` must only be called from the main thread. If
you use multiple threads it is important to use `.IOLoop.add_callback`
to transfer control back to the main thread before finishing the
request.
request, or to limit your use of other threads to
`.IOLoop.run_in_executor` and ensure that your callbacks running in
the executor do not refer to Tornado objects.
"""
@ -80,7 +81,7 @@ import types
from inspect import isclass
from io import BytesIO
from tornado.concurrent import Future
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado import escape
from tornado import gen
from tornado import httputil
@ -245,7 +246,7 @@ class RequestHandler(object):
of the request method.
Asynchronous support: Decorate this method with `.gen.coroutine`
or `.return_future` to make it asynchronous (the
or use ``async def`` to make it asynchronous (the
`asynchronous` decorator cannot be used on `prepare`).
If this method returns a `.Future` execution will not proceed
until the `.Future` is done.
@ -309,20 +310,21 @@ class RequestHandler(object):
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
:arg int status_code: Response status code. If ``reason`` is ``None``,
it must be present in `httplib.responses <http.client.responses>`.
:arg string reason: Human-readable reason phrase describing the status
:arg int status_code: Response status code.
:arg str reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`httplib.responses <http.client.responses>`.
`http.client.responses` or "Unknown".
.. versionchanged:: 5.0
No longer validates that the response code is in
`http.client.responses`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d" % status_code)
self._reason = httputil.responses.get(status_code, "Unknown")
def get_status(self):
"""Returns the status code for our response."""
@ -521,18 +523,28 @@ class RequestHandler(object):
return self.request.cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
"""Returns the value of the request cookie with the given name.
If the named cookie is not present, returns ``default``.
This method only returns cookies that were present in the request.
It does not see the outgoing cookies set by `set_cookie` in this
handler.
"""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
"""Sets an outgoing cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
Newly-set cookies are not immediately visible via `get_cookie`;
they are not present until the next request.
Additional keyword arguments are set on the cookies.Morsel
directly.
See https://docs.python.org/2/library/cookie.html#Cookie.Morsel
See https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
@ -574,6 +586,9 @@ class RequestHandler(object):
path and domain to clear a cookie as were used when that cookie
was set (but there is no way to find out on the server side
which values were used for a given cookie).
Similar to `set_cookie`, the effect of this method will not be
seen until the following request.
"""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
@ -585,6 +600,9 @@ class RequestHandler(object):
See `clear_cookie` for more information on the path and domain
parameters.
Similar to `set_cookie`, the effect of this method will not be
seen until the following request.
.. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters.
@ -609,6 +627,9 @@ class RequestHandler(object):
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
Similar to `set_cookie`, the effect of this method will not be
seen until the following request.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
@ -648,6 +669,10 @@ class RequestHandler(object):
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
Similar to `get_cookie`, this method only returns cookies that
were present in the request. It does not see outgoing cookies set by
`set_secure_cookie` in this handler.
.. versionchanged:: 3.2.1
Added the ``min_version`` argument. Introduced cookie version 2;
@ -709,7 +734,8 @@ class RequestHandler(object):
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
message += ". Lists not accepted for security reasons; see " + \
"http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
@ -974,7 +1000,8 @@ class RequestHandler(object):
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if self._status_code in (204, 304):
if (self._status_code in (204, 304) or
(self._status_code >= 100 and self._status_code < 200)):
assert not self._write_buffer, "Cannot send body with %s" % self._status_code
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
@ -1195,6 +1222,11 @@ class RequestHandler(object):
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
This property is of type `bytes`, but it contains only ASCII
characters. If a character string is required, there is no
need to base64-encode it; just decode the byte string as
UTF-8.
.. versionchanged:: 3.2.2
The xsrf token will now be have a random mask applied in every
request, which makes it safe to include the token in pages
@ -1491,7 +1523,7 @@ class RequestHandler(object):
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
self._prepared_future.set_result(None)
future_set_result_unless_cancelled(self._prepared_future, None)
if self._finished:
return
@ -1516,6 +1548,9 @@ class RequestHandler(object):
self._handle_request_exception(e)
except Exception:
app_log.error("Exception in exception handler", exc_info=True)
finally:
# Unset result to avoid circular references
result = None
if (self._prepared_future is not None and
not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it
@ -1561,11 +1596,7 @@ class RequestHandler(object):
# send a response.
return
if isinstance(e, HTTPError):
if e.status_code not in httputil.responses and not e.reason:
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
@ -1711,7 +1742,7 @@ def stream_request_body(cls):
See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_
for example usage.
"""
""" # noqa: E501
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
@ -1859,6 +1890,17 @@ class Application(ReversibleRouter):
If there's no match for the current request's host, then ``default_host``
parameter value is matched against host regular expressions.
.. warning::
Applications that do not use TLS may be vulnerable to :ref:`DNS
rebinding <dnsrebinding>` attacks. This attack is especially
relevant to applications that only listen on ``127.0.0.1` or
other private networks. Appropriate host patterns must be used
(instead of the default of ``r'.*'``) to prevent this risk. The
``default_host`` argument must not be used in applications that
may be vulnerable to DNS rebinding.
You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the
@ -1869,6 +1911,7 @@ class Application(ReversibleRouter):
.. versionchanged:: 4.5
Integration with the new `tornado.routing` module.
"""
def __init__(self, handlers=None, default_host=None, transforms=None,
**settings):
@ -2089,7 +2132,7 @@ class _HandlerDelegate(httputil.HTTPMessageDelegate):
def finish(self):
if self.stream_request_body:
self.request.body.set_result(None)
future_set_result_unless_cancelled(self.request.body, None)
else:
self.request.body = b''.join(self.chunks)
self.request._parse_body()
@ -2146,11 +2189,11 @@ class HTTPError(Exception):
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses <http.client.responses>` unless the ``reason``
keyword argument is given.
:arg string log_message: Message to be written to the log for this error
:arg str log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
:arg str reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
@ -2256,13 +2299,21 @@ class RedirectHandler(RequestHandler):
.. versionchanged:: 4.5
Added support for substitutions into the destination URL.
.. versionchanged:: 5.0
If any query arguments are present, they will be copied to the
destination URL.
"""
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self, *args):
self.redirect(self._url.format(*args), permanent=self._permanent)
to_url = self._url.format(*args)
if self.request.query_arguments:
to_url = httputil.url_concat(
to_url, list(httputil.qs_to_qsl(self.request.query_arguments)))
self.redirect(to_url, permanent=self._permanent)
class StaticFileHandler(RequestHandler):
@ -2467,8 +2518,9 @@ class StaticFileHandler(RequestHandler):
.. versionadded:: 3.1
"""
if self.check_etag_header():
return True
# If client sent If-None-Match, use it, ignore If-Modified-Since
if self.request.headers.get('If-None-Match'):
return self.check_etag_header()
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
@ -2786,7 +2838,7 @@ class OutputTransform(object):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
@ -2827,7 +2879,7 @@ class GZipContentEncoding(OutputTransform):
return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501
# TODO: can/should this type be inherited from the superclass?
if 'Vary' in headers:
headers['Vary'] += ', Accept-Encoding'

View file

@ -17,7 +17,6 @@ the protocol (known as "draft 76") and are not compatible with this module.
"""
from __future__ import absolute_import, division, print_function
# Author: Jacob Kristhammar, 2010
import base64
import collections
@ -28,7 +27,7 @@ import tornado.escape
import tornado.web
import zlib
from tornado.concurrent import TracebackFuture
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado.escape import utf8, native_str, to_unicode
from tornado import gen, httpclient, httputil
from tornado.ioloop import IOLoop, PeriodicCallback
@ -237,6 +236,7 @@ class WebSocketHandler(tornado.web.RequestHandler):
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
@ -244,6 +244,10 @@ class WebSocketHandler(tornado.web.RequestHandler):
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 5.0
Consistently raises `WebSocketClosedError`. Previously could
sometimes raise `.StreamClosedError`.
"""
if self.ws_connection is None:
raise WebSocketClosedError()
@ -308,8 +312,23 @@ class WebSocketHandler(tornado.web.RequestHandler):
"""
raise NotImplementedError
def ping(self, data):
"""Send ping frame to the remote end."""
def ping(self, data=b''):
"""Send ping frame to the remote end.
The data argument allows a small amount of data (up to 125
bytes) to be sent as a part of the ping message. Note that not
all websocket implementations expose this data to
applications.
Consider using the ``websocket_ping_interval`` application
setting instead of sending pings manually.
.. versionchanged:: 5.1
The data argument is now optional.
"""
data = utf8(data)
if self.ws_connection is None:
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
@ -539,7 +558,8 @@ class _PerMessageDeflateCompressor(object):
self._compressor = None
def _create_compressor(self):
return zlib.compressobj(self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level)
return zlib.compressobj(self._compression_level,
zlib.DEFLATED, -self._max_wbits, self._mem_level)
def compress(self, data):
compressor = self._compressor or self._create_compressor()
@ -616,6 +636,14 @@ class WebSocketProtocol13(WebSocketProtocol):
def accept_connection(self):
try:
self._handle_websocket_headers()
except ValueError:
self.handler.set_status(400)
log_msg = "Missing/Invalid WebSocket headers"
self.handler.finish(log_msg)
gen_log.debug(log_msg)
return
try:
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received",
@ -648,8 +676,7 @@ class WebSocketProtocol13(WebSocketProtocol):
self.request.headers.get("Sec-Websocket-Key"))
def _accept_connection(self):
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
subprotocols = [s.strip() for s in subprotocols.split(',')]
subprotocols = [s.strip() for s in self.request.headers.get_list("Sec-WebSocket-Protocol")]
if subprotocols:
selected = self.handler.select_subprotocol(subprotocols)
if selected:
@ -743,31 +770,35 @@ class WebSocketProtocol13(WebSocketProtocol):
**self._get_compressor_options(other_side, agreed_parameters, compression_options))
def _write_frame(self, fin, opcode, data, flags=0):
data_len = len(data)
if opcode & 0x8:
# All control frames MUST have a payload length of 125
# bytes or less and MUST NOT be fragmented.
if not fin:
raise ValueError("control frames may not be fragmented")
if data_len > 125:
raise ValueError("control frame payloads may not exceed 125 bytes")
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if l < 126:
frame += struct.pack("B", l | mask_bit)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l)
if data_len < 126:
frame += struct.pack("B", data_len | mask_bit)
elif data_len <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, data_len)
else:
frame += struct.pack("!BQ", 127 | mask_bit, l)
frame += struct.pack("!BQ", 127 | mask_bit, data_len)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
try:
return self.stream.write(frame)
except StreamClosedError:
self._abort()
return self.stream.write(frame)
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
@ -782,7 +813,23 @@ class WebSocketProtocol13(WebSocketProtocol):
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
return self._write_frame(True, opcode, message, flags=flags)
# For historical reasons, write methods in Tornado operate in a semi-synchronous
# mode in which awaiting the Future they return is optional (But errors can
# still be raised). This requires us to go through an awkward dance here
# to transform the errors that may be returned while presenting the same
# semi-synchronous interface.
try:
fut = self._write_frame(True, opcode, message, flags=flags)
except StreamClosedError:
raise WebSocketClosedError()
@gen.coroutine
def wrapper():
try:
yield fut
except StreamClosedError:
raise WebSocketClosedError()
return wrapper()
def write_ping(self, data):
"""Send ping frame."""
@ -951,7 +998,10 @@ class WebSocketProtocol13(WebSocketProtocol):
self.close(self.handler.close_code)
elif opcode == 0x9:
# Ping
self._write_frame(True, 0xA, data)
try:
self._write_frame(True, 0xA, data)
except StreamClosedError:
self._abort()
self._run_callback(self.handler.on_ping, data)
elif opcode == 0xA:
# Pong
@ -972,7 +1022,10 @@ class WebSocketProtocol13(WebSocketProtocol):
close_data = struct.pack('>H', code)
if reason is not None:
close_data += utf8(reason)
self._write_frame(True, 0x8, close_data)
try:
self._write_frame(True, 0x8, close_data)
except StreamClosedError:
self._abort()
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
@ -1037,11 +1090,11 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
def __init__(self, io_loop, request, on_message_callback=None,
def __init__(self, request, on_message_callback=None,
compression_options=None, ping_interval=None, ping_timeout=None,
max_message_size=None):
self.compression_options = compression_options
self.connect_future = TracebackFuture()
self.connect_future = Future()
self.protocol = None
self.read_future = None
self.read_queue = collections.deque()
@ -1070,9 +1123,9 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
request.headers['Sec-WebSocket-Extensions'] = (
'permessage-deflate; client_max_window_bits')
self.tcp_client = TCPClient(io_loop=io_loop)
self.tcp_client = TCPClient()
super(WebSocketClientConnection, self).__init__(
io_loop, None, request, lambda: None, self._on_http_response,
None, request, lambda: None, self._on_http_response,
104857600, self.tcp_client, 65536, 104857600)
def close(self, code=None, reason=None):
@ -1129,11 +1182,19 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
# ability to see exceptions.
self.final_callback = None
self.connect_future.set_result(self)
future_set_result_unless_cancelled(self.connect_future, self)
def write_message(self, message, binary=False):
"""Sends a message to the WebSocket server."""
return self.protocol.write_message(message, binary)
"""Sends a message to the WebSocket server.
If the stream is closed, raises `WebSocketClosedError`.
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 5.0
Exception raised on a closed stream changed from `.StreamClosedError`
to `WebSocketClosedError`.
"""
return self.protocol.write_message(message, binary=binary)
def read_message(self, callback=None):
"""Reads a message from the WebSocket server.
@ -1147,9 +1208,9 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
ready.
"""
assert self.read_future is None
future = TracebackFuture()
future = Future()
if self.read_queue:
future.set_result(self.read_queue.popleft())
future_set_result_unless_cancelled(future, self.read_queue.popleft())
else:
self.read_future = future
if callback is not None:
@ -1160,11 +1221,30 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
if self._on_message_callback:
self._on_message_callback(message)
elif self.read_future is not None:
self.read_future.set_result(message)
future_set_result_unless_cancelled(self.read_future, message)
self.read_future = None
else:
self.read_queue.append(message)
def ping(self, data=b''):
"""Send ping frame to the remote end.
The data argument allows a small amount of data (up to 125
bytes) to be sent as a part of the ping message. Note that not
all websocket implementations expose this data to
applications.
Consider using the ``ping_interval`` argument to
`websocket_connect` instead of sending pings manually.
.. versionadded:: 5.1
"""
data = utf8(data)
if self.protocol is None:
raise WebSocketClosedError()
self.protocol.write_ping(data)
def on_pong(self, data):
pass
@ -1176,7 +1256,7 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
compression_options=self.compression_options)
def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
def websocket_connect(url, callback=None, connect_timeout=None,
on_message_callback=None, compression_options=None,
ping_interval=None, ping_timeout=None,
max_message_size=None):
@ -1207,14 +1287,14 @@ def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
The ``io_loop`` argument is deprecated.
.. versionchanged:: 4.5
Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
arguments, which have the same meaning as in `WebSocketHandler`.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
if io_loop is None:
io_loop = IOLoop.current()
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
@ -1225,12 +1305,12 @@ def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS)
conn = WebSocketClientConnection(io_loop, request,
conn = WebSocketClientConnection(request,
on_message_callback=on_message_callback,
compression_options=compression_options,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
max_message_size=max_message_size)
if callback is not None:
io_loop.add_future(conn.connect_future, callback)
IOLoop.current().add_future(conn.connect_future, callback)
return conn.connect_future

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#

View file

@ -13,12 +13,12 @@ from tornado.ioloop import IOLoop
class WebServer(threading.Thread):
def __init__(self, options={}, io_loop=None):
def __init__(self, options={}, **kwargs):
threading.Thread.__init__(self)
self.daemon = True
self.alive = True
self.name = 'TORNADO'
self.io_loop = io_loop or IOLoop.current()
self.io_loop = None
self.server = None
self.options = options
@ -55,7 +55,7 @@ class WebServer(threading.Thread):
# Load the app
self.app = Application([],
debug=True,
debug=False,
autoreload=False,
gzip=True,
cookie_secret=sickbeard.COOKIE_SECRET,
@ -144,6 +144,8 @@ class WebServer(threading.Thread):
logger.ERROR)
return
self.io_loop = IOLoop.current()
try:
self.io_loop.start()
self.io_loop.close(True)
@ -153,4 +155,5 @@ class WebServer(threading.Thread):
def shutDown(self):
self.alive = False
self.io_loop.stop()
if None is not self.io_loop:
self.io_loop.stop()