Merge branch 'refs/heads/develop'
This commit is contained in:
@@ -69,7 +69,7 @@ class Core(Plugin):
|
||||
|
||||
def available(self):
|
||||
return jsonified({
|
||||
'succes': True
|
||||
'success': True
|
||||
})
|
||||
|
||||
def shutdown(self):
|
||||
@@ -101,7 +101,7 @@ class Core(Plugin):
|
||||
|
||||
self.shutdown_started = True
|
||||
|
||||
fireEvent('app.shutdown')
|
||||
fireEvent('app.do_shutdown')
|
||||
log.debug('Every plugin got shutdown event')
|
||||
|
||||
loop = True
|
||||
@@ -177,6 +177,6 @@ class Core(Plugin):
|
||||
def signalHandler(self):
|
||||
|
||||
def signal_handler(signal, frame):
|
||||
fireEvent('app.shutdown')
|
||||
fireEvent('app.do_shutdown')
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
@@ -31,9 +31,14 @@ class Transmission(Downloader):
|
||||
|
||||
# Set parameters for Transmission
|
||||
folder_name = self.createFileName(data, filedata, movie)[:-len(data.get('type')) - 1]
|
||||
folder_path = os.path.join(self.conf('directory', default = ''), folder_name).rstrip(os.path.sep)
|
||||
|
||||
# Create the empty folder to download too
|
||||
self.makeDir(folder_path)
|
||||
|
||||
params = {
|
||||
'paused': self.conf('paused', default = 0),
|
||||
'download-dir': os.path.join(self.conf('directory', default = ''), folder_name).rstrip(os.path.sep)
|
||||
'download-dir': folder_path
|
||||
}
|
||||
|
||||
torrent_params = {
|
||||
|
||||
@@ -35,7 +35,7 @@ class Plugin(object):
|
||||
http_failed_disabled = {}
|
||||
|
||||
def registerPlugin(self):
|
||||
addEvent('app.shutdown', self.doShutdown)
|
||||
addEvent('app.do_shutdown', self.doShutdown)
|
||||
addEvent('plugin.running', self.isRunning)
|
||||
|
||||
def conf(self, attr, value = None, default = None):
|
||||
@@ -114,8 +114,11 @@ class Plugin(object):
|
||||
# Don't try for failed requests
|
||||
if self.http_failed_disabled.get(host, 0) > 0:
|
||||
if self.http_failed_disabled[host] > (time.time() - 900):
|
||||
log.info('Disabled calls to %s for 15 minutes because so many failed requests.', host)
|
||||
raise Exception
|
||||
log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host)
|
||||
if not show_error:
|
||||
raise
|
||||
else:
|
||||
return ''
|
||||
else:
|
||||
del self.http_failed_request[host]
|
||||
del self.http_failed_disabled[host]
|
||||
|
||||
@@ -300,10 +300,11 @@ var MovieList = new Class({
|
||||
},
|
||||
|
||||
deleteSelected: function(){
|
||||
var self = this;
|
||||
var ids = self.getSelectedMovies()
|
||||
var self = this,
|
||||
ids = self.getSelectedMovies(),
|
||||
help_msg = self.identifier == 'wanted' ? 'If you do, you won\'t be able to watch them, as they won\'t get downloaded!' : 'Your files will be safe, this will only delete the reference from the CouchPotato manage list';
|
||||
|
||||
var qObj = new Question('Are you sure you want to delete '+ids.length+' movie'+ (ids.length != 1 ? 's' : '') +'?', 'If you do, you won\'t be able to watch them, as they won\'t get downloaded!', [{
|
||||
var qObj = new Question('Are you sure you want to delete '+ids.length+' movie'+ (ids.length != 1 ? 's' : '') +'?', help_msg, [{
|
||||
'text': 'Yes, delete '+(ids.length != 1 ? 'them' : 'it'),
|
||||
'class': 'delete',
|
||||
'events': {
|
||||
|
||||
@@ -22,7 +22,7 @@ class QualityPlugin(Plugin):
|
||||
{'identifier': '720p', 'hd': True, 'size': (3500, 10000), 'label': '720P', 'width': 1280, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts']},
|
||||
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p'], 'ext':['avi']},
|
||||
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': [], 'allow': [], 'ext':['iso', 'img'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts']},
|
||||
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': ['dvdrip'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
|
||||
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': ['dvdrip'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
|
||||
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip'], 'allow': ['dvdr', 'dvd'], 'ext':['avi', 'mpg', 'mpeg']},
|
||||
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': [], 'allow': ['dvdr'], 'ext':['avi', 'mpg', 'mpeg']},
|
||||
{'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
|
||||
|
||||
@@ -24,7 +24,8 @@ config = [{
|
||||
'name': 'required_words',
|
||||
'label': 'Required words',
|
||||
'default': '',
|
||||
'description': 'Ignore releases that don\'t contain at least one of these words.'
|
||||
'placeholder': 'Example: DTS, AC3 & English',
|
||||
'description': 'Ignore releases that don\'t contain at least one set of words. Sets are separated by "," and each word within a set must be separated with "&"'
|
||||
},
|
||||
{
|
||||
'name': 'ignored_words',
|
||||
|
||||
@@ -3,7 +3,7 @@ from couchpotato.api import addApiView
|
||||
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
|
||||
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
|
||||
from couchpotato.core.helpers.request import jsonified, getParam
|
||||
from couchpotato.core.helpers.variable import md5, getTitle
|
||||
from couchpotato.core.helpers.variable import md5, getTitle, splitString
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
from couchpotato.core.settings.model import Movie, Release, ReleaseInfo
|
||||
@@ -204,6 +204,10 @@ class Searcher(Plugin):
|
||||
|
||||
|
||||
for nzb in sorted_results:
|
||||
if not quality_type.get('finish', False) and quality_type.get('wait_for', 0) > 0 and nzb.get('age') <= quality_type.get('wait_for', 0):
|
||||
log.info('Ignored, waiting %s days: %s', (quality_type.get('wait_for'), nzb['name']))
|
||||
continue
|
||||
|
||||
if nzb['status_id'] == ignored_status.get('id'):
|
||||
log.info('Ignored: %s', nzb['name'])
|
||||
continue
|
||||
@@ -301,13 +305,18 @@ class Searcher(Plugin):
|
||||
movie_words = re.split('\W+', simplifyString(movie_name))
|
||||
nzb_name = simplifyString(nzb['name'])
|
||||
nzb_words = re.split('\W+', nzb_name)
|
||||
required_words = [x.strip().lower() for x in self.conf('required_words').lower().split(',')]
|
||||
required_words = splitString(self.conf('required_words').lower())
|
||||
|
||||
if self.conf('required_words') and not list(set(nzb_words) & set(required_words)):
|
||||
req_match = 0
|
||||
for req_set in required_words:
|
||||
req = splitString(req_set, '&')
|
||||
req_match += len(list(set(nzb_words) & set(req))) == len(req)
|
||||
|
||||
if self.conf('required_words') and req_match == 0:
|
||||
log.info2("Wrong: Required word missing: %s" % nzb['name'])
|
||||
return False
|
||||
|
||||
ignored_words = [x.strip().lower() for x in self.conf('ignored_words').split(',')]
|
||||
ignored_words = splitString(self.conf('ignored_words').lower())
|
||||
blacklisted = list(set(nzb_words) & set(ignored_words))
|
||||
if self.conf('ignored_words') and blacklisted:
|
||||
log.info2("Wrong: '%s' blacklisted words: %s" % (nzb['name'], ", ".join(blacklisted)))
|
||||
@@ -389,6 +398,11 @@ class Searcher(Plugin):
|
||||
if list(set(nzb_words) & set(quality['alternative'])):
|
||||
found[quality['identifier']] = True
|
||||
|
||||
# Try guessing via quality tags
|
||||
guess = fireEvent('quality.guess', [nzb.get('name')], single = True)
|
||||
if guess:
|
||||
found[guess['identifier']] = True
|
||||
|
||||
# Hack for older movies that don't contain quality tag
|
||||
year_name = fireEvent('scanner.name_year', name, single = True)
|
||||
if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None):
|
||||
|
||||
@@ -29,8 +29,6 @@ class NZBClub(NZBProvider, RSS):
|
||||
return results
|
||||
|
||||
q = '"%s %s" %s' % (simplifyString(getTitle(movie['library'])), movie['library']['year'], quality.get('identifier'))
|
||||
for ignored in Env.setting('ignored_words', 'searcher').split(','):
|
||||
q = '%s -%s' % (q, ignored.strip())
|
||||
|
||||
params = {
|
||||
'q': q,
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
from .main import NZBMatrix
|
||||
|
||||
def start():
|
||||
return NZBMatrix()
|
||||
|
||||
config = [{
|
||||
'name': 'nzbmatrix',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'searcher',
|
||||
'subtab': 'nzb_providers',
|
||||
'name': 'nzbmatrix',
|
||||
'label': 'NZBMatrix',
|
||||
'description': 'See <a href="https://nzbmatrix.com/">NZBMatrix</a>',
|
||||
'wizard': True,
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
'type': 'enabler',
|
||||
},
|
||||
{
|
||||
'name': 'username',
|
||||
},
|
||||
{
|
||||
'name': 'api_key',
|
||||
'default': '',
|
||||
'label': 'Api Key',
|
||||
},
|
||||
{
|
||||
'name': 'english_only',
|
||||
'default': 1,
|
||||
'type': 'bool',
|
||||
'label': 'English only',
|
||||
'description': 'Only search for English spoken movies on NZBMatrix',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}]
|
||||
@@ -1,105 +0,0 @@
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.nzb.base import NZBProvider
|
||||
from couchpotato.environment import Env
|
||||
from dateutil.parser import parse
|
||||
import time
|
||||
import xml.etree.ElementTree as XMLTree
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class NZBMatrix(NZBProvider, RSS):
|
||||
|
||||
urls = {
|
||||
'download': 'https://api.nzbmatrix.com/v1.1/download.php?id=%s',
|
||||
'detail': 'https://nzbmatrix.com/nzb-details.php?id=%s&hit=1',
|
||||
'search': 'https://rss.nzbmatrix.com/rss.php',
|
||||
}
|
||||
|
||||
cat_ids = [
|
||||
([50], ['bd50']),
|
||||
([42, 53], ['720p', '1080p']),
|
||||
([2, 9], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']),
|
||||
([54], ['brrip']),
|
||||
([1], ['dvdr']),
|
||||
]
|
||||
cat_backup_id = 2
|
||||
|
||||
def search(self, movie, quality):
|
||||
|
||||
results = []
|
||||
|
||||
if self.isDisabled():
|
||||
return results
|
||||
|
||||
cat_ids = ','.join(['%s' % x for x in self.getCatId(quality.get('identifier'))])
|
||||
|
||||
arguments = tryUrlencode({
|
||||
'term': movie['library']['identifier'],
|
||||
'subcat': cat_ids,
|
||||
'username': self.conf('username'),
|
||||
'apikey': self.conf('api_key'),
|
||||
'searchin': 'weblink',
|
||||
'maxage': Env.setting('retention', section = 'nzb'),
|
||||
'english': self.conf('english_only'),
|
||||
})
|
||||
url = "%s?%s" % (self.urls['search'], arguments)
|
||||
|
||||
cache_key = 'nzbmatrix.%s.%s' % (movie['library'].get('identifier'), cat_ids)
|
||||
|
||||
data = self.getCache(cache_key, url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
|
||||
if data:
|
||||
try:
|
||||
try:
|
||||
data = XMLTree.fromstring(data)
|
||||
nzbs = self.getElements(data, 'channel/item')
|
||||
except Exception, e:
|
||||
log.debug('%s, %s', (self.getName(), e))
|
||||
return results
|
||||
|
||||
for nzb in nzbs:
|
||||
|
||||
title = self.getTextElement(nzb, "title")
|
||||
if 'error' in title.lower(): continue
|
||||
|
||||
id = int(self.getTextElement(nzb, "link").split('&')[0].partition('id=')[2])
|
||||
size = self.getTextElement(nzb, "description").split('<br /><b>')[2].split('> ')[1]
|
||||
date = str(self.getTextElement(nzb, "description").split('<br /><b>')[3].partition('Added:</b> ')[2])
|
||||
|
||||
new = {
|
||||
'id': id,
|
||||
'type': 'nzb',
|
||||
'provider': self.getName(),
|
||||
'name': title,
|
||||
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
|
||||
'size': self.parseSize(size),
|
||||
'url': self.urls['download'] % id + self.getApiExt(),
|
||||
'download': self.download,
|
||||
'detail_url': self.urls['detail'] % id,
|
||||
'description': self.getTextElement(nzb, "description"),
|
||||
'check_nzb': True,
|
||||
}
|
||||
|
||||
is_correct_movie = fireEvent('searcher.correct_movie',
|
||||
nzb = new, movie = movie, quality = quality,
|
||||
imdb_results = True, single = True)
|
||||
|
||||
if is_correct_movie:
|
||||
new['score'] = fireEvent('score.calculate', new, movie, single = True)
|
||||
results.append(new)
|
||||
self.found(new)
|
||||
|
||||
return results
|
||||
except SyntaxError:
|
||||
log.error('Failed to parse XML response from NZBMatrix.com')
|
||||
|
||||
return results
|
||||
|
||||
def getApiExt(self):
|
||||
return '&username=%s&apikey=%s' % (self.conf('username'), self.conf('api_key'))
|
||||
|
||||
def isEnabled(self):
|
||||
return NZBProvider.isEnabled(self) and self.conf('username') and self.conf('api_key')
|
||||
@@ -31,6 +31,10 @@ config = [{
|
||||
'name': 'password',
|
||||
'default': '',
|
||||
'type': 'password',
|
||||
},
|
||||
{
|
||||
'name': 'passkey',
|
||||
'default': '',
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ class PassThePopcorn(TorrentProvider):
|
||||
'domain': 'https://tls.passthepopcorn.me',
|
||||
'detail': 'https://tls.passthepopcorn.me/torrents.php?torrentid=%s',
|
||||
'torrent': 'https://tls.passthepopcorn.me/torrents.php',
|
||||
'login': 'https://tls.passthepopcorn.me/login.php',
|
||||
'login': 'https://tls.passthepopcorn.me/ajax.php?action=login',
|
||||
'search': 'https://tls.passthepopcorn.me/search/%s/0/7/%d'
|
||||
}
|
||||
|
||||
@@ -249,6 +249,7 @@ class PassThePopcorn(TorrentProvider):
|
||||
return tryUrlencode({
|
||||
'username': self.conf('username'),
|
||||
'password': self.conf('password'),
|
||||
'passkey': self.conf('passkey'),
|
||||
'keeplogged': '1',
|
||||
'login': 'Login'
|
||||
})
|
||||
|
||||
@@ -4,10 +4,8 @@ from couchpotato.api import api, NonBlockHandler
|
||||
from couchpotato.core.event import fireEventAsync, fireEvent
|
||||
from couchpotato.core.helpers.variable import getDataDir, tryInt
|
||||
from logging import handlers
|
||||
from tornado import autoreload
|
||||
from tornado.httpserver import HTTPServer
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.web import RequestHandler, Application, FallbackHandler
|
||||
from tornado.web import Application, FallbackHandler
|
||||
from tornado.wsgi import WSGIContainer
|
||||
from werkzeug.contrib.cache import FileSystemCache
|
||||
import locale
|
||||
@@ -57,10 +55,8 @@ def _log(status_code, request):
|
||||
|
||||
if status_code < 400:
|
||||
return
|
||||
elif status_code < 500:
|
||||
log_method = logging.warning
|
||||
else:
|
||||
log_method = logging.error
|
||||
log_method = logging.debug
|
||||
request_time = 1000.0 * request.request_time()
|
||||
summary = request.method + " " + request.uri + " (" + \
|
||||
request.remote_ip + ")"
|
||||
|
||||
@@ -436,9 +436,14 @@ Option.String = new Class({
|
||||
self.input = new Element('input.inlay', {
|
||||
'type': 'text',
|
||||
'name': self.postName(),
|
||||
'value': self.getSettingValue()
|
||||
'value': self.getSettingValue(),
|
||||
'placeholder': self.getPlaceholder()
|
||||
})
|
||||
);
|
||||
},
|
||||
|
||||
getPlaceholder: function(){
|
||||
return this.options.placeholder
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -25,5 +25,5 @@ from __future__ import absolute_import, division, with_statement
|
||||
# is zero for an official release, positive for a development branch,
|
||||
# or negative for a release candidate (after the base version number
|
||||
# has been incremented)
|
||||
version = "2.3.post1"
|
||||
version_info = (2, 3, 0, 1)
|
||||
version = "2.4.post2"
|
||||
version_info = (2, 4, 0, 2)
|
||||
|
||||
@@ -50,7 +50,6 @@ import base64
|
||||
import binascii
|
||||
import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
import time
|
||||
import urllib
|
||||
import urlparse
|
||||
@@ -59,6 +58,7 @@ import uuid
|
||||
from tornado import httpclient
|
||||
from tornado import escape
|
||||
from tornado.httputil import url_concat
|
||||
from tornado.log import gen_log
|
||||
from tornado.util import bytes_type, b
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ class OpenIdMixin(object):
|
||||
args["openid.mode"] = u"check_authentication"
|
||||
url = self._OPENID_ENDPOINT
|
||||
if http_client is None:
|
||||
http_client = httpclient.AsyncHTTPClient()
|
||||
http_client = self.get_auth_http_client()
|
||||
http_client.fetch(url, self.async_callback(
|
||||
self._on_authentication_verified, callback),
|
||||
method="POST", body=urllib.urlencode(args))
|
||||
@@ -150,7 +150,7 @@ class OpenIdMixin(object):
|
||||
|
||||
def _on_authentication_verified(self, callback, response):
|
||||
if response.error or b("is_valid:true") not in response.body:
|
||||
logging.warning("Invalid OpenID response: %s", response.error or
|
||||
gen_log.warning("Invalid OpenID response: %s", response.error or
|
||||
response.body)
|
||||
callback(None)
|
||||
return
|
||||
@@ -203,8 +203,19 @@ class OpenIdMixin(object):
|
||||
user["locale"] = locale
|
||||
if username:
|
||||
user["username"] = username
|
||||
claimed_id = self.get_argument("openid.claimed_id", None)
|
||||
if claimed_id:
|
||||
user["claimed_id"] = claimed_id
|
||||
callback(user)
|
||||
|
||||
def get_auth_http_client(self):
|
||||
"""Returns the AsyncHTTPClient instance to be used for auth requests.
|
||||
|
||||
May be overridden by subclasses to use an http client other than
|
||||
the default.
|
||||
"""
|
||||
return httpclient.AsyncHTTPClient()
|
||||
|
||||
|
||||
class OAuthMixin(object):
|
||||
"""Abstract implementation of OAuth.
|
||||
@@ -229,7 +240,7 @@ class OAuthMixin(object):
|
||||
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
|
||||
raise Exception("This service does not support oauth_callback")
|
||||
if http_client is None:
|
||||
http_client = httpclient.AsyncHTTPClient()
|
||||
http_client = self.get_auth_http_client()
|
||||
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
|
||||
http_client.fetch(
|
||||
self._oauth_request_token_url(callback_uri=callback_uri,
|
||||
@@ -260,21 +271,21 @@ class OAuthMixin(object):
|
||||
oauth_verifier = self.get_argument("oauth_verifier", None)
|
||||
request_cookie = self.get_cookie("_oauth_request_token")
|
||||
if not request_cookie:
|
||||
logging.warning("Missing OAuth request token cookie")
|
||||
gen_log.warning("Missing OAuth request token cookie")
|
||||
callback(None)
|
||||
return
|
||||
self.clear_cookie("_oauth_request_token")
|
||||
cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
|
||||
if cookie_key != request_key:
|
||||
logging.info((cookie_key, request_key, request_cookie))
|
||||
logging.warning("Request token does not match cookie")
|
||||
gen_log.info((cookie_key, request_key, request_cookie))
|
||||
gen_log.warning("Request token does not match cookie")
|
||||
callback(None)
|
||||
return
|
||||
token = dict(key=cookie_key, secret=cookie_secret)
|
||||
if oauth_verifier:
|
||||
token["verifier"] = oauth_verifier
|
||||
if http_client is None:
|
||||
http_client = httpclient.AsyncHTTPClient()
|
||||
http_client = self.get_auth_http_client()
|
||||
http_client.fetch(self._oauth_access_token_url(token),
|
||||
self.async_callback(self._on_access_token, callback))
|
||||
|
||||
@@ -282,14 +293,16 @@ class OAuthMixin(object):
|
||||
consumer_token = self._oauth_consumer_token()
|
||||
url = self._OAUTH_REQUEST_TOKEN_URL
|
||||
args = dict(
|
||||
oauth_consumer_key=consumer_token["key"],
|
||||
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
|
||||
oauth_signature_method="HMAC-SHA1",
|
||||
oauth_timestamp=str(int(time.time())),
|
||||
oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes),
|
||||
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
|
||||
oauth_version=getattr(self, "_OAUTH_VERSION", "1.0a"),
|
||||
)
|
||||
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
|
||||
if callback_uri:
|
||||
if callback_uri == "oob":
|
||||
args["oauth_callback"] = "oob"
|
||||
elif callback_uri:
|
||||
args["oauth_callback"] = urlparse.urljoin(
|
||||
self.request.full_url(), callback_uri)
|
||||
if extra_params:
|
||||
@@ -309,7 +322,10 @@ class OAuthMixin(object):
|
||||
base64.b64encode(request_token["secret"]))
|
||||
self.set_cookie("_oauth_request_token", data)
|
||||
args = dict(oauth_token=request_token["key"])
|
||||
if callback_uri:
|
||||
if callback_uri == "oob":
|
||||
self.finish(authorize_url + "?" + urllib.urlencode(args))
|
||||
return
|
||||
elif callback_uri:
|
||||
args["oauth_callback"] = urlparse.urljoin(
|
||||
self.request.full_url(), callback_uri)
|
||||
self.redirect(authorize_url + "?" + urllib.urlencode(args))
|
||||
@@ -318,11 +334,11 @@ class OAuthMixin(object):
|
||||
consumer_token = self._oauth_consumer_token()
|
||||
url = self._OAUTH_ACCESS_TOKEN_URL
|
||||
args = dict(
|
||||
oauth_consumer_key=consumer_token["key"],
|
||||
oauth_token=request_token["key"],
|
||||
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
|
||||
oauth_token=escape.to_basestring(request_token["key"]),
|
||||
oauth_signature_method="HMAC-SHA1",
|
||||
oauth_timestamp=str(int(time.time())),
|
||||
oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes),
|
||||
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
|
||||
oauth_version=getattr(self, "_OAUTH_VERSION", "1.0a"),
|
||||
)
|
||||
if "verifier" in request_token:
|
||||
@@ -340,7 +356,7 @@ class OAuthMixin(object):
|
||||
|
||||
def _on_access_token(self, callback, response):
|
||||
if response.error:
|
||||
logging.warning("Could not fetch access token")
|
||||
gen_log.warning("Could not fetch access token")
|
||||
callback(None)
|
||||
return
|
||||
|
||||
@@ -367,11 +383,11 @@ class OAuthMixin(object):
|
||||
"""
|
||||
consumer_token = self._oauth_consumer_token()
|
||||
base_args = dict(
|
||||
oauth_consumer_key=consumer_token["key"],
|
||||
oauth_token=access_token["key"],
|
||||
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
|
||||
oauth_token=escape.to_basestring(access_token["key"]),
|
||||
oauth_signature_method="HMAC-SHA1",
|
||||
oauth_timestamp=str(int(time.time())),
|
||||
oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes),
|
||||
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
|
||||
oauth_version=getattr(self, "_OAUTH_VERSION", "1.0a"),
|
||||
)
|
||||
args = {}
|
||||
@@ -386,6 +402,14 @@ class OAuthMixin(object):
|
||||
base_args["oauth_signature"] = signature
|
||||
return base_args
|
||||
|
||||
def get_auth_http_client(self):
|
||||
"""Returns the AsyncHTTPClient instance to be used for auth requests.
|
||||
|
||||
May be overridden by subclasses to use an http client other than
|
||||
the default.
|
||||
"""
|
||||
return httpclient.AsyncHTTPClient()
|
||||
|
||||
|
||||
class OAuth2Mixin(object):
|
||||
"""Abstract implementation of OAuth v 2."""
|
||||
@@ -463,6 +487,7 @@ class TwitterMixin(OAuthMixin):
|
||||
_OAUTH_AUTHORIZE_URL = "http://api.twitter.com/oauth/authorize"
|
||||
_OAUTH_AUTHENTICATE_URL = "http://api.twitter.com/oauth/authenticate"
|
||||
_OAUTH_NO_CALLBACKS = False
|
||||
_TWITTER_BASE_URL = "http://api.twitter.com/1"
|
||||
|
||||
def authenticate_redirect(self, callback_uri=None):
|
||||
"""Just like authorize_redirect(), but auto-redirects if authorized.
|
||||
@@ -470,7 +495,7 @@ class TwitterMixin(OAuthMixin):
|
||||
This is generally the right interface to use if you are using
|
||||
Twitter for single-sign on.
|
||||
"""
|
||||
http = httpclient.AsyncHTTPClient()
|
||||
http = self.get_auth_http_client()
|
||||
http.fetch(self._oauth_request_token_url(callback_uri=callback_uri), self.async_callback(
|
||||
self._on_request_token, self._OAUTH_AUTHENTICATE_URL, None))
|
||||
|
||||
@@ -517,7 +542,7 @@ class TwitterMixin(OAuthMixin):
|
||||
# usual pattern: http://search.twitter.com/search.json
|
||||
url = path
|
||||
else:
|
||||
url = "http://api.twitter.com/1" + path + ".json"
|
||||
url = self._TWITTER_BASE_URL + path + ".json"
|
||||
# Add the OAuth resource request signature if we have credentials
|
||||
if access_token:
|
||||
all_args = {}
|
||||
@@ -530,7 +555,7 @@ class TwitterMixin(OAuthMixin):
|
||||
if args:
|
||||
url += "?" + urllib.urlencode(args)
|
||||
callback = self.async_callback(self._on_twitter_request, callback)
|
||||
http = httpclient.AsyncHTTPClient()
|
||||
http = self.get_auth_http_client()
|
||||
if post_args is not None:
|
||||
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
|
||||
callback=callback)
|
||||
@@ -539,7 +564,7 @@ class TwitterMixin(OAuthMixin):
|
||||
|
||||
def _on_twitter_request(self, callback, response):
|
||||
if response.error:
|
||||
logging.warning("Error response %s fetching %s", response.error,
|
||||
gen_log.warning("Error response %s fetching %s", response.error,
|
||||
response.request.url)
|
||||
callback(None)
|
||||
return
|
||||
@@ -555,7 +580,7 @@ class TwitterMixin(OAuthMixin):
|
||||
def _oauth_get_user(self, access_token, callback):
|
||||
callback = self.async_callback(self._parse_user_response, callback)
|
||||
self.twitter_request(
|
||||
"/users/show/" + access_token["screen_name"],
|
||||
"/users/show/" + escape.native_str(access_token[b("screen_name")]),
|
||||
access_token=access_token, callback=callback)
|
||||
|
||||
def _parse_user_response(self, callback, user):
|
||||
@@ -652,7 +677,7 @@ class FriendFeedMixin(OAuthMixin):
|
||||
if args:
|
||||
url += "?" + urllib.urlencode(args)
|
||||
callback = self.async_callback(self._on_friendfeed_request, callback)
|
||||
http = httpclient.AsyncHTTPClient()
|
||||
http = self.get_auth_http_client()
|
||||
if post_args is not None:
|
||||
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
|
||||
callback=callback)
|
||||
@@ -661,7 +686,7 @@ class FriendFeedMixin(OAuthMixin):
|
||||
|
||||
def _on_friendfeed_request(self, callback, response):
|
||||
if response.error:
|
||||
logging.warning("Error response %s fetching %s", response.error,
|
||||
gen_log.warning("Error response %s fetching %s", response.error,
|
||||
response.request.url)
|
||||
callback(None)
|
||||
return
|
||||
@@ -743,7 +768,7 @@ class GoogleMixin(OpenIdMixin, OAuthMixin):
|
||||
break
|
||||
token = self.get_argument("openid." + oauth_ns + ".request_token", "")
|
||||
if token:
|
||||
http = httpclient.AsyncHTTPClient()
|
||||
http = self.get_auth_http_client()
|
||||
token = dict(key=token, secret="")
|
||||
http.fetch(self._oauth_access_token_url(token),
|
||||
self.async_callback(self._on_access_token, callback))
|
||||
@@ -854,7 +879,7 @@ class FacebookMixin(object):
|
||||
self._on_get_user_info, callback, session),
|
||||
session_key=session["session_key"],
|
||||
uids=session["uid"],
|
||||
fields="uid,first_name,last_name,name,locale,pic_square," \
|
||||
fields="uid,first_name,last_name,name,locale,pic_square,"
|
||||
"profile_url,username")
|
||||
|
||||
def facebook_request(self, method, callback, **args):
|
||||
@@ -899,7 +924,7 @@ class FacebookMixin(object):
|
||||
args["sig"] = self._signature(args)
|
||||
url = "http://api.facebook.com/restserver.php?" + \
|
||||
urllib.urlencode(args)
|
||||
http = httpclient.AsyncHTTPClient()
|
||||
http = self.get_auth_http_client()
|
||||
http.fetch(url, callback=self.async_callback(
|
||||
self._parse_response, callback))
|
||||
|
||||
@@ -922,17 +947,17 @@ class FacebookMixin(object):
|
||||
|
||||
def _parse_response(self, callback, response):
|
||||
if response.error:
|
||||
logging.warning("HTTP error from Facebook: %s", response.error)
|
||||
gen_log.warning("HTTP error from Facebook: %s", response.error)
|
||||
callback(None)
|
||||
return
|
||||
try:
|
||||
json = escape.json_decode(response.body)
|
||||
except Exception:
|
||||
logging.warning("Invalid JSON from Facebook: %r", response.body)
|
||||
gen_log.warning("Invalid JSON from Facebook: %r", response.body)
|
||||
callback(None)
|
||||
return
|
||||
if isinstance(json, dict) and json.get("error_code"):
|
||||
logging.warning("Facebook error: %d: %r", json["error_code"],
|
||||
gen_log.warning("Facebook error: %d: %r", json["error_code"],
|
||||
json.get("error_msg"))
|
||||
callback(None)
|
||||
return
|
||||
@@ -945,6 +970,14 @@ class FacebookMixin(object):
|
||||
body = body.encode("utf-8")
|
||||
return hashlib.md5(body).hexdigest()
|
||||
|
||||
def get_auth_http_client(self):
|
||||
"""Returns the AsyncHTTPClient instance to be used for auth requests.
|
||||
|
||||
May be overridden by subclasses to use an http client other than
|
||||
the default.
|
||||
"""
|
||||
return httpclient.AsyncHTTPClient()
|
||||
|
||||
|
||||
class FacebookGraphMixin(OAuth2Mixin):
|
||||
"""Facebook authentication using the new Graph API and OAuth2."""
|
||||
@@ -979,7 +1012,7 @@ class FacebookGraphMixin(OAuth2Mixin):
|
||||
self.finish()
|
||||
|
||||
"""
|
||||
http = httpclient.AsyncHTTPClient()
|
||||
http = self.get_auth_http_client()
|
||||
args = {
|
||||
"redirect_uri": redirect_uri,
|
||||
"code": code,
|
||||
@@ -999,7 +1032,7 @@ class FacebookGraphMixin(OAuth2Mixin):
|
||||
def _on_access_token(self, redirect_uri, client_id, client_secret,
|
||||
callback, fields, response):
|
||||
if response.error:
|
||||
logging.warning('Facebook auth error: %s' % str(response))
|
||||
gen_log.warning('Facebook auth error: %s' % str(response))
|
||||
callback(None)
|
||||
return
|
||||
|
||||
@@ -1073,7 +1106,7 @@ class FacebookGraphMixin(OAuth2Mixin):
|
||||
if all_args:
|
||||
url += "?" + urllib.urlencode(all_args)
|
||||
callback = self.async_callback(self._on_facebook_request, callback)
|
||||
http = httpclient.AsyncHTTPClient()
|
||||
http = self.get_auth_http_client()
|
||||
if post_args is not None:
|
||||
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
|
||||
callback=callback)
|
||||
@@ -1082,12 +1115,20 @@ class FacebookGraphMixin(OAuth2Mixin):
|
||||
|
||||
def _on_facebook_request(self, callback, response):
|
||||
if response.error:
|
||||
logging.warning("Error response %s fetching %s", response.error,
|
||||
gen_log.warning("Error response %s fetching %s", response.error,
|
||||
response.request.url)
|
||||
callback(None)
|
||||
return
|
||||
callback(escape.json_decode(response.body))
|
||||
|
||||
def get_auth_http_client(self):
|
||||
"""Returns the AsyncHTTPClient instance to be used for auth requests.
|
||||
|
||||
May be overridden by subclasses to use an http client other than
|
||||
the default.
|
||||
"""
|
||||
return httpclient.AsyncHTTPClient()
|
||||
|
||||
|
||||
def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
|
||||
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
|
||||
|
||||
@@ -71,10 +71,13 @@ import logging
|
||||
import os
|
||||
import pkgutil
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
import subprocess
|
||||
import weakref
|
||||
|
||||
from tornado import ioloop
|
||||
from tornado.log import gen_log
|
||||
from tornado import process
|
||||
|
||||
try:
|
||||
@@ -83,6 +86,11 @@ except ImportError:
|
||||
signal = None
|
||||
|
||||
|
||||
_watched_files = set()
|
||||
_reload_hooks = []
|
||||
_reload_attempted = False
|
||||
_io_loops = weakref.WeakKeyDictionary()
|
||||
|
||||
def start(io_loop=None, check_time=500):
|
||||
"""Restarts the process automatically when a module is modified.
|
||||
|
||||
@@ -90,6 +98,11 @@ def start(io_loop=None, check_time=500):
|
||||
so will terminate any pending requests.
|
||||
"""
|
||||
io_loop = io_loop or ioloop.IOLoop.instance()
|
||||
if io_loop in _io_loops:
|
||||
return
|
||||
_io_loops[io_loop] = True
|
||||
if len(_io_loops) > 1:
|
||||
gen_log.warning("tornado.autoreload started more than once in the same process")
|
||||
add_reload_hook(functools.partial(_close_all_fds, io_loop))
|
||||
modify_times = {}
|
||||
callback = functools.partial(_reload_on_update, modify_times)
|
||||
@@ -108,8 +121,6 @@ def wait():
|
||||
start(io_loop)
|
||||
io_loop.start()
|
||||
|
||||
_watched_files = set()
|
||||
|
||||
|
||||
def watch(filename):
|
||||
"""Add a file to the watch list.
|
||||
@@ -118,8 +129,6 @@ def watch(filename):
|
||||
"""
|
||||
_watched_files.add(filename)
|
||||
|
||||
_reload_hooks = []
|
||||
|
||||
|
||||
def add_reload_hook(fn):
|
||||
"""Add a function to be called before reloading the process.
|
||||
@@ -139,8 +148,6 @@ def _close_all_fds(io_loop):
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
_reload_attempted = False
|
||||
|
||||
|
||||
def _reload_on_update(modify_times):
|
||||
if _reload_attempted:
|
||||
@@ -177,7 +184,7 @@ def _check_file(modify_times, path):
|
||||
modify_times[path] = modified
|
||||
return
|
||||
if modify_times[path] != modified:
|
||||
logging.info("%s modified; restarting server", path)
|
||||
gen_log.info("%s modified; restarting server", path)
|
||||
_reload()
|
||||
|
||||
|
||||
@@ -272,13 +279,25 @@ def main():
|
||||
# module) will see the right things.
|
||||
exec f.read() in globals(), globals()
|
||||
except SystemExit, e:
|
||||
logging.info("Script exited with status %s", e.code)
|
||||
logging.basicConfig()
|
||||
gen_log.info("Script exited with status %s", e.code)
|
||||
except Exception, e:
|
||||
logging.warning("Script exited with uncaught exception", exc_info=True)
|
||||
logging.basicConfig()
|
||||
gen_log.warning("Script exited with uncaught exception", exc_info=True)
|
||||
# If an exception occurred at import time, the file with the error
|
||||
# never made it into sys.modules and so we won't know to watch it.
|
||||
# Just to make sure we've covered everything, walk the stack trace
|
||||
# from the exception and watch every file.
|
||||
for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]):
|
||||
watch(filename)
|
||||
if isinstance(e, SyntaxError):
|
||||
# SyntaxErrors are special: their innermost stack frame is fake
|
||||
# so extract_tb won't see it and we have to get the filename
|
||||
# from the exception object.
|
||||
watch(e.filename)
|
||||
else:
|
||||
logging.info("Script exited normally")
|
||||
logging.basicConfig()
|
||||
gen_log.info("Script exited normally")
|
||||
# restore sys.argv so subsequent executions will include autoreload
|
||||
sys.argv = original_argv
|
||||
|
||||
|
||||
127
libs/tornado/concurrent.py
Executable file
127
libs/tornado/concurrent.py
Executable file
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2012 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import functools
|
||||
import sys
|
||||
|
||||
from tornado.stack_context import ExceptionStackContext
|
||||
from tornado.util import raise_exc_info
|
||||
|
||||
try:
|
||||
from concurrent import futures
|
||||
except ImportError:
|
||||
futures = None
|
||||
|
||||
|
||||
class DummyFuture(object):
|
||||
def __init__(self):
|
||||
self._done = False
|
||||
self._result = None
|
||||
self._exception = None
|
||||
self._callbacks = []
|
||||
|
||||
def cancel(self):
|
||||
return False
|
||||
|
||||
def cancelled(self):
|
||||
return False
|
||||
|
||||
def running(self):
|
||||
return not self._done
|
||||
|
||||
def done(self):
|
||||
return self._done
|
||||
|
||||
def result(self, timeout=None):
|
||||
self._check_done()
|
||||
if self._exception:
|
||||
raise self._exception
|
||||
return self._result
|
||||
|
||||
def exception(self, timeout=None):
|
||||
self._check_done()
|
||||
if self._exception:
|
||||
return self._exception
|
||||
else:
|
||||
return None
|
||||
|
||||
def add_done_callback(self, fn):
|
||||
if self._done:
|
||||
fn(self)
|
||||
else:
|
||||
self._callbacks.append(fn)
|
||||
|
||||
def set_result(self, result):
|
||||
self._result = result
|
||||
self._set_done()
|
||||
|
||||
def set_exception(self, exception):
|
||||
self._exception = exception
|
||||
self._set_done()
|
||||
|
||||
def _check_done(self):
|
||||
if not self._done:
|
||||
raise Exception("DummyFuture does not support blocking for results")
|
||||
|
||||
def _set_done(self):
|
||||
self._done = True
|
||||
for cb in self._callbacks:
|
||||
# TODO: error handling
|
||||
cb(self)
|
||||
self._callbacks = None
|
||||
|
||||
if futures is None:
|
||||
Future = DummyFuture
|
||||
else:
|
||||
Future = futures.Future
|
||||
|
||||
class DummyExecutor(object):
|
||||
def submit(self, fn, *args, **kwargs):
|
||||
future = Future()
|
||||
try:
|
||||
future.set_result(fn(*args, **kwargs))
|
||||
except Exception, e:
|
||||
future.set_exception(e)
|
||||
return future
|
||||
|
||||
dummy_executor = DummyExecutor()
|
||||
|
||||
def run_on_executor(fn):
|
||||
@functools.wraps(fn)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
callback = kwargs.pop("callback")
|
||||
future = self.executor.submit(fn, self, *args, **kwargs)
|
||||
if callback:
|
||||
self.io_loop.add_future(future, callback)
|
||||
return future
|
||||
return wrapper
|
||||
|
||||
# TODO: this needs a better name
|
||||
def future_wrap(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
future = Future()
|
||||
if kwargs.get('callback') is not None:
|
||||
future.add_done_callback(kwargs.pop('callback'))
|
||||
kwargs['callback'] = future.set_result
|
||||
def handle_error(typ, value, tb):
|
||||
future.set_exception(value)
|
||||
return True
|
||||
with ExceptionStackContext(handle_error):
|
||||
f(*args, **kwargs)
|
||||
return future
|
||||
return wrapper
|
||||
@@ -27,21 +27,23 @@ import time
|
||||
|
||||
from tornado import httputil
|
||||
from tornado import ioloop
|
||||
from tornado.log import gen_log
|
||||
from tornado import stack_context
|
||||
|
||||
from tornado.escape import utf8
|
||||
from tornado.httpclient import HTTPRequest, HTTPResponse, HTTPError, AsyncHTTPClient, main
|
||||
from tornado.httpclient import HTTPRequest, HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy
|
||||
|
||||
|
||||
class CurlAsyncHTTPClient(AsyncHTTPClient):
|
||||
def initialize(self, io_loop=None, max_clients=10,
|
||||
max_simultaneous_connections=None):
|
||||
def initialize(self, io_loop=None, max_clients=10, defaults=None):
|
||||
self.io_loop = io_loop
|
||||
self.defaults = dict(HTTPRequest._DEFAULTS)
|
||||
if defaults is not None:
|
||||
self.defaults.update(defaults)
|
||||
self._multi = pycurl.CurlMulti()
|
||||
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
|
||||
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
|
||||
self._curls = [_curl_create(max_simultaneous_connections)
|
||||
for i in xrange(max_clients)]
|
||||
self._curls = [_curl_create() for i in xrange(max_clients)]
|
||||
self._free_list = self._curls[:]
|
||||
self._requests = collections.deque()
|
||||
self._fds = {}
|
||||
@@ -53,7 +55,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
|
||||
# socket_action is found in pycurl since 7.18.2 (it's been
|
||||
# in libcurl longer than that but wasn't accessible to
|
||||
# python).
|
||||
logging.warning("socket_action method missing from pycurl; "
|
||||
gen_log.warning("socket_action method missing from pycurl; "
|
||||
"falling back to socket_all. Upgrading "
|
||||
"libcurl and pycurl will improve performance")
|
||||
self._socket_action = \
|
||||
@@ -78,6 +80,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
|
||||
def fetch(self, request, callback, **kwargs):
|
||||
if not isinstance(request, HTTPRequest):
|
||||
request = HTTPRequest(url=request, **kwargs)
|
||||
request = _RequestProxy(request, self.defaults)
|
||||
self._requests.append((request, stack_context.wrap(callback)))
|
||||
self._process_queue()
|
||||
self._set_timeout(0)
|
||||
@@ -110,7 +113,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
|
||||
if self._timeout is not None:
|
||||
self.io_loop.remove_timeout(self._timeout)
|
||||
self._timeout = self.io_loop.add_timeout(
|
||||
time.time() + msecs / 1000.0, self._handle_timeout)
|
||||
self.io_loop.time() + msecs / 1000.0, self._handle_timeout)
|
||||
|
||||
def _handle_events(self, fd, events):
|
||||
"""Called by IOLoop when there is activity on one of our
|
||||
@@ -263,12 +266,11 @@ class CurlError(HTTPError):
|
||||
self.errno = errno
|
||||
|
||||
|
||||
def _curl_create(max_simultaneous_connections=None):
|
||||
def _curl_create():
|
||||
curl = pycurl.Curl()
|
||||
if logging.getLogger().isEnabledFor(logging.DEBUG):
|
||||
if gen_log.isEnabledFor(logging.DEBUG):
|
||||
curl.setopt(pycurl.VERBOSE, 1)
|
||||
curl.setopt(pycurl.DEBUGFUNCTION, _curl_debug)
|
||||
curl.setopt(pycurl.MAXCONNECTS, max_simultaneous_connections or 5)
|
||||
return curl
|
||||
|
||||
|
||||
@@ -389,11 +391,11 @@ def _curl_setup_request(curl, request, buffer, headers):
|
||||
userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
|
||||
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
|
||||
curl.setopt(pycurl.USERPWD, utf8(userpwd))
|
||||
logging.debug("%s %s (username: %r)", request.method, request.url,
|
||||
gen_log.debug("%s %s (username: %r)", request.method, request.url,
|
||||
request.auth_username)
|
||||
else:
|
||||
curl.unsetopt(pycurl.USERPWD)
|
||||
logging.debug("%s %s", request.method, request.url)
|
||||
gen_log.debug("%s %s", request.method, request.url)
|
||||
|
||||
if request.client_cert is not None:
|
||||
curl.setopt(pycurl.SSLCERT, request.client_cert)
|
||||
@@ -429,12 +431,12 @@ def _curl_header_callback(headers, header_line):
|
||||
def _curl_debug(debug_type, debug_msg):
|
||||
debug_types = ('I', '<', '>', '<', '>')
|
||||
if debug_type == 0:
|
||||
logging.debug('%s', debug_msg.strip())
|
||||
gen_log.debug('%s', debug_msg.strip())
|
||||
elif debug_type in (1, 2):
|
||||
for line in debug_msg.splitlines():
|
||||
logging.debug('%s %s', debug_types[debug_type], line)
|
||||
gen_log.debug('%s %s', debug_types[debug_type], line)
|
||||
elif debug_type == 4:
|
||||
logging.debug('%s %r', debug_types[debug_type], debug_msg)
|
||||
gen_log.debug('%s %r', debug_types[debug_type], debug_msg)
|
||||
|
||||
if __name__ == "__main__":
|
||||
AsyncHTTPClient.configure(CurlAsyncHTTPClient)
|
||||
|
||||
@@ -1,238 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2009 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""A lightweight wrapper around MySQLdb."""
|
||||
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
import logging
|
||||
import time
|
||||
|
||||
try:
|
||||
import MySQLdb.constants
|
||||
import MySQLdb.converters
|
||||
import MySQLdb.cursors
|
||||
except ImportError:
|
||||
# If MySQLdb isn't available this module won't actually be useable,
|
||||
# but we want it to at least be importable (mainly for readthedocs.org,
|
||||
# which has limitations on third-party modules)
|
||||
MySQLdb = None
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""A lightweight wrapper around MySQLdb DB-API connections.
|
||||
|
||||
The main value we provide is wrapping rows in a dict/object so that
|
||||
columns can be accessed by name. Typical usage::
|
||||
|
||||
db = database.Connection("localhost", "mydatabase")
|
||||
for article in db.query("SELECT * FROM articles"):
|
||||
print article.title
|
||||
|
||||
Cursors are hidden by the implementation, but other than that, the methods
|
||||
are very similar to the DB-API.
|
||||
|
||||
We explicitly set the timezone to UTC and the character encoding to
|
||||
UTF-8 on all connections to avoid time zone and encoding errors.
|
||||
"""
|
||||
def __init__(self, host, database, user=None, password=None,
|
||||
max_idle_time=7 * 3600):
|
||||
self.host = host
|
||||
self.database = database
|
||||
self.max_idle_time = max_idle_time
|
||||
|
||||
args = dict(conv=CONVERSIONS, use_unicode=True, charset="utf8",
|
||||
db=database, init_command='SET time_zone = "+0:00"',
|
||||
sql_mode="TRADITIONAL")
|
||||
if user is not None:
|
||||
args["user"] = user
|
||||
if password is not None:
|
||||
args["passwd"] = password
|
||||
|
||||
# We accept a path to a MySQL socket file or a host(:port) string
|
||||
if "/" in host:
|
||||
args["unix_socket"] = host
|
||||
else:
|
||||
self.socket = None
|
||||
pair = host.split(":")
|
||||
if len(pair) == 2:
|
||||
args["host"] = pair[0]
|
||||
args["port"] = int(pair[1])
|
||||
else:
|
||||
args["host"] = host
|
||||
args["port"] = 3306
|
||||
|
||||
self._db = None
|
||||
self._db_args = args
|
||||
self._last_use_time = time.time()
|
||||
try:
|
||||
self.reconnect()
|
||||
except Exception:
|
||||
logging.error("Cannot connect to MySQL on %s", self.host,
|
||||
exc_info=True)
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def close(self):
|
||||
"""Closes this database connection."""
|
||||
if getattr(self, "_db", None) is not None:
|
||||
self._db.close()
|
||||
self._db = None
|
||||
|
||||
def reconnect(self):
|
||||
"""Closes the existing database connection and re-opens it."""
|
||||
self.close()
|
||||
self._db = MySQLdb.connect(**self._db_args)
|
||||
self._db.autocommit(True)
|
||||
|
||||
def iter(self, query, *parameters):
|
||||
"""Returns an iterator for the given query and parameters."""
|
||||
self._ensure_connected()
|
||||
cursor = MySQLdb.cursors.SSCursor(self._db)
|
||||
try:
|
||||
self._execute(cursor, query, parameters)
|
||||
column_names = [d[0] for d in cursor.description]
|
||||
for row in cursor:
|
||||
yield Row(zip(column_names, row))
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def query(self, query, *parameters):
|
||||
"""Returns a row list for the given query and parameters."""
|
||||
cursor = self._cursor()
|
||||
try:
|
||||
self._execute(cursor, query, parameters)
|
||||
column_names = [d[0] for d in cursor.description]
|
||||
return [Row(itertools.izip(column_names, row)) for row in cursor]
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def get(self, query, *parameters):
|
||||
"""Returns the first row returned for the given query."""
|
||||
rows = self.query(query, *parameters)
|
||||
if not rows:
|
||||
return None
|
||||
elif len(rows) > 1:
|
||||
raise Exception("Multiple rows returned for Database.get() query")
|
||||
else:
|
||||
return rows[0]
|
||||
|
||||
# rowcount is a more reasonable default return value than lastrowid,
|
||||
# but for historical compatibility execute() must return lastrowid.
|
||||
def execute(self, query, *parameters):
|
||||
"""Executes the given query, returning the lastrowid from the query."""
|
||||
return self.execute_lastrowid(query, *parameters)
|
||||
|
||||
def execute_lastrowid(self, query, *parameters):
|
||||
"""Executes the given query, returning the lastrowid from the query."""
|
||||
cursor = self._cursor()
|
||||
try:
|
||||
self._execute(cursor, query, parameters)
|
||||
return cursor.lastrowid
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def execute_rowcount(self, query, *parameters):
|
||||
"""Executes the given query, returning the rowcount from the query."""
|
||||
cursor = self._cursor()
|
||||
try:
|
||||
self._execute(cursor, query, parameters)
|
||||
return cursor.rowcount
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def executemany(self, query, parameters):
|
||||
"""Executes the given query against all the given param sequences.
|
||||
|
||||
We return the lastrowid from the query.
|
||||
"""
|
||||
return self.executemany_lastrowid(query, parameters)
|
||||
|
||||
def executemany_lastrowid(self, query, parameters):
|
||||
"""Executes the given query against all the given param sequences.
|
||||
|
||||
We return the lastrowid from the query.
|
||||
"""
|
||||
cursor = self._cursor()
|
||||
try:
|
||||
cursor.executemany(query, parameters)
|
||||
return cursor.lastrowid
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def executemany_rowcount(self, query, parameters):
|
||||
"""Executes the given query against all the given param sequences.
|
||||
|
||||
We return the rowcount from the query.
|
||||
"""
|
||||
cursor = self._cursor()
|
||||
try:
|
||||
cursor.executemany(query, parameters)
|
||||
return cursor.rowcount
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def _ensure_connected(self):
|
||||
# Mysql by default closes client connections that are idle for
|
||||
# 8 hours, but the client library does not report this fact until
|
||||
# you try to perform a query and it fails. Protect against this
|
||||
# case by preemptively closing and reopening the connection
|
||||
# if it has been idle for too long (7 hours by default).
|
||||
if (self._db is None or
|
||||
(time.time() - self._last_use_time > self.max_idle_time)):
|
||||
self.reconnect()
|
||||
self._last_use_time = time.time()
|
||||
|
||||
def _cursor(self):
|
||||
self._ensure_connected()
|
||||
return self._db.cursor()
|
||||
|
||||
def _execute(self, cursor, query, parameters):
|
||||
try:
|
||||
return cursor.execute(query, parameters)
|
||||
except OperationalError:
|
||||
logging.error("Error connecting to MySQL on %s", self.host)
|
||||
self.close()
|
||||
raise
|
||||
|
||||
|
||||
class Row(dict):
|
||||
"""A dict that allows for object-like property access syntax."""
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return self[name]
|
||||
except KeyError:
|
||||
raise AttributeError(name)
|
||||
|
||||
if MySQLdb is not None:
|
||||
# Fix the access conversions to properly recognize unicode/binary
|
||||
FIELD_TYPE = MySQLdb.constants.FIELD_TYPE
|
||||
FLAG = MySQLdb.constants.FLAG
|
||||
CONVERSIONS = copy.copy(MySQLdb.converters.conversions)
|
||||
|
||||
field_types = [FIELD_TYPE.BLOB, FIELD_TYPE.STRING, FIELD_TYPE.VAR_STRING]
|
||||
if 'VARCHAR' in vars(FIELD_TYPE):
|
||||
field_types.append(FIELD_TYPE.VARCHAR)
|
||||
|
||||
for field_type in field_types:
|
||||
CONVERSIONS[field_type] = [(FLAG.BINARY, str)] + CONVERSIONS[field_type]
|
||||
|
||||
# Alias some common MySQL exceptions
|
||||
IntegrityError = MySQLdb.IntegrityError
|
||||
OperationalError = MySQLdb.OperationalError
|
||||
@@ -69,6 +69,8 @@ import operator
|
||||
import sys
|
||||
import types
|
||||
|
||||
from tornado.concurrent import Future
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.stack_context import ExceptionStackContext
|
||||
|
||||
|
||||
@@ -247,6 +249,24 @@ class Task(YieldPoint):
|
||||
return self.runner.pop_result(self.key)
|
||||
|
||||
|
||||
class YieldFuture(YieldPoint):
|
||||
def __init__(self, future, io_loop=None):
|
||||
self.future = future
|
||||
self.io_loop = io_loop or IOLoop.current()
|
||||
|
||||
def start(self, runner):
|
||||
self.runner = runner
|
||||
self.key = object()
|
||||
runner.register_callback(self.key)
|
||||
self.io_loop.add_future(self.future, runner.result_callback(self.key))
|
||||
|
||||
def is_ready(self):
|
||||
return self.runner.is_ready(self.key)
|
||||
|
||||
def get_result(self):
|
||||
return self.runner.pop_result(self.key).result()
|
||||
|
||||
|
||||
class Multi(YieldPoint):
|
||||
"""Runs multiple asynchronous operations in parallel.
|
||||
|
||||
@@ -354,12 +374,16 @@ class Runner(object):
|
||||
"finished without waiting for callbacks %r" %
|
||||
self.pending_callbacks)
|
||||
self.deactivate_stack_context()
|
||||
self.deactivate_stack_context = None
|
||||
return
|
||||
except Exception:
|
||||
self.finished = True
|
||||
raise
|
||||
if isinstance(yielded, list):
|
||||
yielded = Multi(yielded)
|
||||
if isinstance(yielded, Future):
|
||||
# TODO: lists of futures
|
||||
yielded = YieldFuture(yielded)
|
||||
if isinstance(yielded, YieldPoint):
|
||||
self.yield_point = yielded
|
||||
try:
|
||||
|
||||
@@ -38,9 +38,9 @@ import time
|
||||
import weakref
|
||||
|
||||
from tornado.escape import utf8
|
||||
from tornado import httputil
|
||||
from tornado import httputil, stack_context
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.util import import_object, bytes_type
|
||||
from tornado.util import Configurable
|
||||
|
||||
|
||||
class HTTPClient(object):
|
||||
@@ -95,7 +95,7 @@ class HTTPClient(object):
|
||||
return response
|
||||
|
||||
|
||||
class AsyncHTTPClient(object):
|
||||
class AsyncHTTPClient(Configurable):
|
||||
"""An non-blocking HTTP client.
|
||||
|
||||
Example usage::
|
||||
@@ -121,46 +121,31 @@ class AsyncHTTPClient(object):
|
||||
are deprecated. The implementation subclass as well as arguments to
|
||||
its constructor can be set with the static method configure()
|
||||
"""
|
||||
_impl_class = None
|
||||
_impl_kwargs = None
|
||||
@classmethod
|
||||
def configurable_base(cls):
|
||||
return AsyncHTTPClient
|
||||
|
||||
_DEFAULT_MAX_CLIENTS = 10
|
||||
@classmethod
|
||||
def configurable_default(cls):
|
||||
from tornado.simple_httpclient import SimpleAsyncHTTPClient
|
||||
return SimpleAsyncHTTPClient
|
||||
|
||||
@classmethod
|
||||
def _async_clients(cls):
|
||||
assert cls is not AsyncHTTPClient, "should only be called on subclasses"
|
||||
if not hasattr(cls, '_async_client_dict'):
|
||||
cls._async_client_dict = weakref.WeakKeyDictionary()
|
||||
return cls._async_client_dict
|
||||
attr_name = '_async_client_dict_' + cls.__name__
|
||||
if not hasattr(cls, attr_name):
|
||||
setattr(cls, attr_name, weakref.WeakKeyDictionary())
|
||||
return getattr(cls, attr_name)
|
||||
|
||||
def __new__(cls, io_loop=None, max_clients=None, force_instance=False,
|
||||
**kwargs):
|
||||
def __new__(cls, io_loop=None, force_instance=False, **kwargs):
|
||||
io_loop = io_loop or IOLoop.instance()
|
||||
if cls is AsyncHTTPClient:
|
||||
if cls._impl_class is None:
|
||||
from tornado.simple_httpclient import SimpleAsyncHTTPClient
|
||||
AsyncHTTPClient._impl_class = SimpleAsyncHTTPClient
|
||||
impl = AsyncHTTPClient._impl_class
|
||||
else:
|
||||
impl = cls
|
||||
if io_loop in impl._async_clients() and not force_instance:
|
||||
return impl._async_clients()[io_loop]
|
||||
else:
|
||||
instance = super(AsyncHTTPClient, cls).__new__(impl)
|
||||
args = {}
|
||||
if cls._impl_kwargs:
|
||||
args.update(cls._impl_kwargs)
|
||||
args.update(kwargs)
|
||||
if max_clients is not None:
|
||||
# max_clients is special because it may be passed
|
||||
# positionally instead of by keyword
|
||||
args["max_clients"] = max_clients
|
||||
elif "max_clients" not in args:
|
||||
args["max_clients"] = AsyncHTTPClient._DEFAULT_MAX_CLIENTS
|
||||
instance.initialize(io_loop, **args)
|
||||
if not force_instance:
|
||||
impl._async_clients()[io_loop] = instance
|
||||
return instance
|
||||
if io_loop in cls._async_clients() and not force_instance:
|
||||
return cls._async_clients()[io_loop]
|
||||
instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop,
|
||||
**kwargs)
|
||||
if not force_instance:
|
||||
cls._async_clients()[io_loop] = instance
|
||||
return instance
|
||||
|
||||
def close(self):
|
||||
"""Destroys this http client, freeing any file descriptors used.
|
||||
@@ -185,8 +170,8 @@ class AsyncHTTPClient(object):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def configure(impl, **kwargs):
|
||||
@classmethod
|
||||
def configure(cls, impl, **kwargs):
|
||||
"""Configures the AsyncHTTPClient subclass to use.
|
||||
|
||||
AsyncHTTPClient() actually creates an instance of a subclass.
|
||||
@@ -205,38 +190,38 @@ class AsyncHTTPClient(object):
|
||||
|
||||
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
|
||||
"""
|
||||
if isinstance(impl, (unicode, bytes_type)):
|
||||
impl = import_object(impl)
|
||||
if impl is not None and not issubclass(impl, AsyncHTTPClient):
|
||||
raise ValueError("Invalid AsyncHTTPClient implementation")
|
||||
AsyncHTTPClient._impl_class = impl
|
||||
AsyncHTTPClient._impl_kwargs = kwargs
|
||||
|
||||
@staticmethod
|
||||
def _save_configuration():
|
||||
return (AsyncHTTPClient._impl_class, AsyncHTTPClient._impl_kwargs)
|
||||
|
||||
@staticmethod
|
||||
def _restore_configuration(saved):
|
||||
AsyncHTTPClient._impl_class = saved[0]
|
||||
AsyncHTTPClient._impl_kwargs = saved[1]
|
||||
super(AsyncHTTPClient, cls).configure(impl, **kwargs)
|
||||
|
||||
|
||||
class HTTPRequest(object):
|
||||
"""HTTP client request object."""
|
||||
|
||||
# Default values for HTTPRequest parameters.
|
||||
# Merged with the values on the request object by AsyncHTTPClient
|
||||
# implementations.
|
||||
_DEFAULTS = dict(
|
||||
connect_timeout=20.0,
|
||||
request_timeout=20.0,
|
||||
follow_redirects=True,
|
||||
max_redirects=5,
|
||||
use_gzip=True,
|
||||
proxy_password='',
|
||||
allow_nonstandard_methods=False,
|
||||
validate_cert=True)
|
||||
|
||||
def __init__(self, url, method="GET", headers=None, body=None,
|
||||
auth_username=None, auth_password=None,
|
||||
connect_timeout=20.0, request_timeout=20.0,
|
||||
if_modified_since=None, follow_redirects=True,
|
||||
max_redirects=5, user_agent=None, use_gzip=True,
|
||||
connect_timeout=None, request_timeout=None,
|
||||
if_modified_since=None, follow_redirects=None,
|
||||
max_redirects=None, user_agent=None, use_gzip=None,
|
||||
network_interface=None, streaming_callback=None,
|
||||
header_callback=None, prepare_curl_callback=None,
|
||||
proxy_host=None, proxy_port=None, proxy_username=None,
|
||||
proxy_password='', allow_nonstandard_methods=False,
|
||||
validate_cert=True, ca_certs=None,
|
||||
proxy_password=None, allow_nonstandard_methods=None,
|
||||
validate_cert=None, ca_certs=None,
|
||||
allow_ipv6=None,
|
||||
client_key=None, client_cert=None):
|
||||
"""Creates an `HTTPRequest`.
|
||||
r"""Creates an `HTTPRequest`.
|
||||
|
||||
All parameters except `url` are optional.
|
||||
|
||||
@@ -261,8 +246,13 @@ class HTTPRequest(object):
|
||||
`~HTTPResponse.body` and `~HTTPResponse.buffer` will be empty in
|
||||
the final response.
|
||||
:arg callable header_callback: If set, `header_callback` will
|
||||
be run with each header line as it is received, and
|
||||
`~HTTPResponse.headers` will be empty in the final response.
|
||||
be run with each header line as it is received (including the
|
||||
first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
|
||||
containing only ``\r\n``. All lines include the trailing newline
|
||||
characters). `~HTTPResponse.headers` will be empty in the final
|
||||
response. This is most useful in conjunction with
|
||||
`streaming_callback`, because it's the only way to get access to
|
||||
header data while the request is in progress.
|
||||
:arg callable prepare_curl_callback: If set, will be called with
|
||||
a `pycurl.Curl` object to allow the application to make additional
|
||||
`setopt` calls.
|
||||
@@ -310,9 +300,9 @@ class HTTPRequest(object):
|
||||
self.user_agent = user_agent
|
||||
self.use_gzip = use_gzip
|
||||
self.network_interface = network_interface
|
||||
self.streaming_callback = streaming_callback
|
||||
self.header_callback = header_callback
|
||||
self.prepare_curl_callback = prepare_curl_callback
|
||||
self.streaming_callback = stack_context.wrap(streaming_callback)
|
||||
self.header_callback = stack_context.wrap(header_callback)
|
||||
self.prepare_curl_callback = stack_context.wrap(prepare_curl_callback)
|
||||
self.allow_nonstandard_methods = allow_nonstandard_methods
|
||||
self.validate_cert = validate_cert
|
||||
self.ca_certs = ca_certs
|
||||
@@ -331,11 +321,15 @@ class HTTPResponse(object):
|
||||
|
||||
* code: numeric HTTP status code, e.g. 200 or 404
|
||||
|
||||
* reason: human-readable reason phrase describing the status code
|
||||
(with curl_httpclient, this is a default value rather than the
|
||||
server's actual response)
|
||||
|
||||
* headers: httputil.HTTPHeaders object
|
||||
|
||||
* buffer: cStringIO object for response body
|
||||
|
||||
* body: respose body as string (created on demand from self.buffer)
|
||||
* body: response body as string (created on demand from self.buffer)
|
||||
|
||||
* error: Exception object, if any
|
||||
|
||||
@@ -349,9 +343,10 @@ class HTTPResponse(object):
|
||||
"""
|
||||
def __init__(self, request, code, headers=None, buffer=None,
|
||||
effective_url=None, error=None, request_time=None,
|
||||
time_info=None):
|
||||
time_info=None, reason=None):
|
||||
self.request = request
|
||||
self.code = code
|
||||
self.reason = reason or httplib.responses.get(code, "Unknown")
|
||||
if headers is not None:
|
||||
self.headers = headers
|
||||
else:
|
||||
@@ -412,6 +407,24 @@ class HTTPError(Exception):
|
||||
self.response = response
|
||||
Exception.__init__(self, "HTTP %d: %s" % (self.code, message))
|
||||
|
||||
class _RequestProxy(object):
|
||||
"""Combines an object with a dictionary of defaults.
|
||||
|
||||
Used internally by AsyncHTTPClient implementations.
|
||||
"""
|
||||
def __init__(self, request, defaults):
|
||||
self.request = request
|
||||
self.defaults = defaults
|
||||
|
||||
def __getattr__(self, name):
|
||||
request_attr = getattr(self.request, name)
|
||||
if request_attr is not None:
|
||||
return request_attr
|
||||
elif self.defaults is not None:
|
||||
return self.defaults.get(name, None)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
from tornado.options import define, options, parse_command_line
|
||||
|
||||
@@ -27,13 +27,13 @@ This module also defines the `HTTPRequest` class which is exposed via
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import Cookie
|
||||
import logging
|
||||
import socket
|
||||
import time
|
||||
|
||||
from tornado.escape import native_str, parse_qs_bytes
|
||||
from tornado import httputil
|
||||
from tornado import iostream
|
||||
from tornado.log import gen_log
|
||||
from tornado.netutil import TCPServer
|
||||
from tornado import stack_context
|
||||
from tornado.util import b, bytes_type
|
||||
@@ -67,21 +67,30 @@ class HTTPServer(TCPServer):
|
||||
http_server.listen(8888)
|
||||
ioloop.IOLoop.instance().start()
|
||||
|
||||
`HTTPServer` is a very basic connection handler. Beyond parsing the
|
||||
HTTP request body and headers, the only HTTP semantics implemented
|
||||
in `HTTPServer` is HTTP/1.1 keep-alive connections. We do not, however,
|
||||
implement chunked encoding, so the request callback must provide a
|
||||
``Content-Length`` header or implement chunked encoding for HTTP/1.1
|
||||
requests for the server to run correctly for HTTP/1.1 clients. If
|
||||
the request handler is unable to do this, you can provide the
|
||||
``no_keep_alive`` argument to the `HTTPServer` constructor, which will
|
||||
ensure the connection is closed on every request no matter what HTTP
|
||||
version the client is using.
|
||||
`HTTPServer` is a very basic connection handler. It parses the request
|
||||
headers and body, but the request callback is responsible for producing
|
||||
the response exactly as it will appear on the wire. This affords
|
||||
maximum flexibility for applications to implement whatever parts
|
||||
of HTTP responses are required.
|
||||
|
||||
If ``xheaders`` is ``True``, we support the ``X-Real-Ip`` and ``X-Scheme``
|
||||
headers, which override the remote IP and HTTP scheme for all requests.
|
||||
These headers are useful when running Tornado behind a reverse proxy or
|
||||
load balancer.
|
||||
`HTTPServer` supports keep-alive connections by default
|
||||
(automatically for HTTP/1.1, or for HTTP/1.0 when the client
|
||||
requests ``Connection: keep-alive``). This means that the request
|
||||
callback must generate a properly-framed response, using either
|
||||
the ``Content-Length`` header or ``Transfer-Encoding: chunked``.
|
||||
Applications that are unable to frame their responses properly
|
||||
should instead return a ``Connection: close`` header in each
|
||||
response and pass ``no_keep_alive=True`` to the `HTTPServer`
|
||||
constructor.
|
||||
|
||||
If ``xheaders`` is ``True``, we support the
|
||||
``X-Real-Ip``/``X-Forwarded-For`` and
|
||||
``X-Scheme``/``X-Forwarded-Proto`` headers, which override the
|
||||
remote IP and URI scheme/protocol for all requests. These headers
|
||||
are useful when running Tornado behind a reverse proxy or load
|
||||
balancer. The ``protocol`` argument can also be set to ``https``
|
||||
if Tornado is run behind an SSL-decoding proxy that does not set one of
|
||||
the supported ``xheaders``.
|
||||
|
||||
`HTTPServer` can serve SSL traffic with Python 2.6+ and OpenSSL.
|
||||
To make this server serve SSL traffic, send the ssl_options dictionary
|
||||
@@ -134,16 +143,17 @@ class HTTPServer(TCPServer):
|
||||
|
||||
"""
|
||||
def __init__(self, request_callback, no_keep_alive=False, io_loop=None,
|
||||
xheaders=False, ssl_options=None, **kwargs):
|
||||
xheaders=False, ssl_options=None, protocol=None, **kwargs):
|
||||
self.request_callback = request_callback
|
||||
self.no_keep_alive = no_keep_alive
|
||||
self.xheaders = xheaders
|
||||
self.protocol = protocol
|
||||
TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options,
|
||||
**kwargs)
|
||||
|
||||
def handle_stream(self, stream, address):
|
||||
HTTPConnection(stream, address, self.request_callback,
|
||||
self.no_keep_alive, self.xheaders)
|
||||
self.no_keep_alive, self.xheaders, self.protocol)
|
||||
|
||||
|
||||
class _BadRequestException(Exception):
|
||||
@@ -158,12 +168,13 @@ class HTTPConnection(object):
|
||||
until the HTTP conection is closed.
|
||||
"""
|
||||
def __init__(self, stream, address, request_callback, no_keep_alive=False,
|
||||
xheaders=False):
|
||||
xheaders=False, protocol=None):
|
||||
self.stream = stream
|
||||
self.address = address
|
||||
self.request_callback = request_callback
|
||||
self.no_keep_alive = no_keep_alive
|
||||
self.xheaders = xheaders
|
||||
self.protocol = protocol
|
||||
self._request = None
|
||||
self._request_finished = False
|
||||
# Save stack context here, outside of any request. This keeps
|
||||
@@ -172,6 +183,12 @@ class HTTPConnection(object):
|
||||
self.stream.read_until(b("\r\n\r\n"), self._header_callback)
|
||||
self._write_callback = None
|
||||
|
||||
def close(self):
|
||||
self.stream.close()
|
||||
# Remove this reference to self, which would otherwise cause a
|
||||
# cycle and delay garbage collection of this connection.
|
||||
self._header_callback = None
|
||||
|
||||
def write(self, chunk, callback=None):
|
||||
"""Writes a chunk of output to the stream."""
|
||||
assert self._request, "Request closed"
|
||||
@@ -218,9 +235,15 @@ class HTTPConnection(object):
|
||||
self._request = None
|
||||
self._request_finished = False
|
||||
if disconnect:
|
||||
self.stream.close()
|
||||
self.close()
|
||||
return
|
||||
self.stream.read_until(b("\r\n\r\n"), self._header_callback)
|
||||
try:
|
||||
# Use a try/except instead of checking stream.closed()
|
||||
# directly, because in some cases the stream doesn't discover
|
||||
# that it's closed until you try to read from it.
|
||||
self.stream.read_until(b("\r\n\r\n"), self._header_callback)
|
||||
except iostream.StreamClosedError:
|
||||
self.close()
|
||||
|
||||
def _on_headers(self, data):
|
||||
try:
|
||||
@@ -247,7 +270,7 @@ class HTTPConnection(object):
|
||||
|
||||
self._request = HTTPRequest(
|
||||
connection=self, method=method, uri=uri, version=version,
|
||||
headers=headers, remote_ip=remote_ip)
|
||||
headers=headers, remote_ip=remote_ip, protocol=self.protocol)
|
||||
|
||||
content_length = headers.get("Content-Length")
|
||||
if content_length:
|
||||
@@ -261,9 +284,9 @@ class HTTPConnection(object):
|
||||
|
||||
self.request_callback(self._request)
|
||||
except _BadRequestException, e:
|
||||
logging.info("Malformed HTTP request from %s: %s",
|
||||
gen_log.info("Malformed HTTP request from %s: %s",
|
||||
self.address[0], e)
|
||||
self.stream.close()
|
||||
self.close()
|
||||
return
|
||||
|
||||
def _on_request_body(self, data):
|
||||
@@ -382,12 +405,7 @@ class HTTPRequest(object):
|
||||
self._finish_time = None
|
||||
|
||||
self.path, sep, self.query = uri.partition('?')
|
||||
arguments = parse_qs_bytes(self.query)
|
||||
self.arguments = {}
|
||||
for name, values in arguments.iteritems():
|
||||
values = [v for v in values if v]
|
||||
if values:
|
||||
self.arguments[name] = values
|
||||
self.arguments = parse_qs_bytes(self.query, keep_blank_values=True)
|
||||
|
||||
def supports_http_1_1(self):
|
||||
"""Returns True if this request supports HTTP/1.1 semantics"""
|
||||
@@ -427,7 +445,7 @@ class HTTPRequest(object):
|
||||
else:
|
||||
return self._finish_time - self._start_time
|
||||
|
||||
def get_ssl_certificate(self):
|
||||
def get_ssl_certificate(self, binary_form=False):
|
||||
"""Returns the client's SSL certificate, if any.
|
||||
|
||||
To use client certificates, the HTTPServer must have been constructed
|
||||
@@ -440,12 +458,16 @@ class HTTPRequest(object):
|
||||
cert_reqs=ssl.CERT_REQUIRED,
|
||||
ca_certs="cacert.crt"))
|
||||
|
||||
The return value is a dictionary, see SSLSocket.getpeercert() in
|
||||
the standard library for more details.
|
||||
By default, the return value is a dictionary (or None, if no
|
||||
client certificate is present). If ``binary_form`` is true, a
|
||||
DER-encoded form of the certificate is returned instead. See
|
||||
SSLSocket.getpeercert() in the standard library for more
|
||||
details.
|
||||
http://docs.python.org/library/ssl.html#sslsocket-objects
|
||||
"""
|
||||
try:
|
||||
return self.connection.stream.socket.getpeercert()
|
||||
return self.connection.stream.socket.getpeercert(
|
||||
binary_form=binary_form)
|
||||
except ssl.SSLError:
|
||||
return None
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@
|
||||
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import logging
|
||||
import urllib
|
||||
import re
|
||||
|
||||
from tornado.escape import native_str, parse_qs_bytes, utf8
|
||||
from tornado.log import gen_log
|
||||
from tornado.util import b, ObjectDict
|
||||
|
||||
|
||||
@@ -207,6 +207,13 @@ class HTTPFile(ObjectDict):
|
||||
|
||||
|
||||
def parse_body_arguments(content_type, body, arguments, files):
|
||||
"""Parses a form request body.
|
||||
|
||||
Supports "application/x-www-form-urlencoded" and "multipart/form-data".
|
||||
The content_type parameter should be a string and body should be
|
||||
a byte string. The arguments and files parameters are dictionaries
|
||||
that will be updated with the parsed contents.
|
||||
"""
|
||||
if content_type.startswith("application/x-www-form-urlencoded"):
|
||||
uri_arguments = parse_qs_bytes(native_str(body))
|
||||
for name, values in uri_arguments.iteritems():
|
||||
@@ -221,7 +228,7 @@ def parse_body_arguments(content_type, body, arguments, files):
|
||||
parse_multipart_form_data(utf8(v), body, arguments, files)
|
||||
break
|
||||
else:
|
||||
logging.warning("Invalid multipart/form-data")
|
||||
gen_log.warning("Invalid multipart/form-data")
|
||||
|
||||
|
||||
def parse_multipart_form_data(boundary, data, arguments, files):
|
||||
@@ -240,7 +247,7 @@ def parse_multipart_form_data(boundary, data, arguments, files):
|
||||
boundary = boundary[1:-1]
|
||||
final_boundary_index = data.rfind(b("--") + boundary + b("--"))
|
||||
if final_boundary_index == -1:
|
||||
logging.warning("Invalid multipart/form-data: no final boundary")
|
||||
gen_log.warning("Invalid multipart/form-data: no final boundary")
|
||||
return
|
||||
parts = data[:final_boundary_index].split(b("--") + boundary + b("\r\n"))
|
||||
for part in parts:
|
||||
@@ -248,17 +255,17 @@ def parse_multipart_form_data(boundary, data, arguments, files):
|
||||
continue
|
||||
eoh = part.find(b("\r\n\r\n"))
|
||||
if eoh == -1:
|
||||
logging.warning("multipart/form-data missing headers")
|
||||
gen_log.warning("multipart/form-data missing headers")
|
||||
continue
|
||||
headers = HTTPHeaders.parse(part[:eoh].decode("utf-8"))
|
||||
disp_header = headers.get("Content-Disposition", "")
|
||||
disposition, disp_params = _parse_header(disp_header)
|
||||
if disposition != "form-data" or not part.endswith(b("\r\n")):
|
||||
logging.warning("Invalid multipart/form-data")
|
||||
gen_log.warning("Invalid multipart/form-data")
|
||||
continue
|
||||
value = part[eoh + 4:-2]
|
||||
if not disp_params.get("name"):
|
||||
logging.warning("multipart/form-data value missing name")
|
||||
gen_log.warning("multipart/form-data value missing name")
|
||||
continue
|
||||
name = disp_params["name"]
|
||||
if disp_params.get("filename"):
|
||||
|
||||
@@ -30,26 +30,36 @@ from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import datetime
|
||||
import errno
|
||||
import functools
|
||||
import heapq
|
||||
import os
|
||||
import logging
|
||||
import os
|
||||
import select
|
||||
import sys
|
||||
import thread
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from tornado.concurrent import DummyFuture
|
||||
from tornado.log import app_log, gen_log
|
||||
from tornado import stack_context
|
||||
from tornado.util import Configurable
|
||||
|
||||
try:
|
||||
import signal
|
||||
except ImportError:
|
||||
signal = None
|
||||
|
||||
try:
|
||||
from concurrent import futures
|
||||
except ImportError:
|
||||
futures = None
|
||||
|
||||
from tornado.platform.auto import set_close_exec, Waker
|
||||
|
||||
|
||||
class IOLoop(object):
|
||||
class IOLoop(Configurable):
|
||||
"""A level-triggered I/O loop.
|
||||
|
||||
We use epoll (Linux) or kqueue (BSD and Mac OS X; requires python
|
||||
@@ -107,26 +117,7 @@ class IOLoop(object):
|
||||
# Global lock for creating global IOLoop instance
|
||||
_instance_lock = threading.Lock()
|
||||
|
||||
def __init__(self, impl=None):
|
||||
self._impl = impl or _poll()
|
||||
if hasattr(self._impl, 'fileno'):
|
||||
set_close_exec(self._impl.fileno())
|
||||
self._handlers = {}
|
||||
self._events = {}
|
||||
self._callbacks = []
|
||||
self._callback_lock = threading.Lock()
|
||||
self._timeouts = []
|
||||
self._running = False
|
||||
self._stopped = False
|
||||
self._thread_ident = None
|
||||
self._blocking_signal_threshold = None
|
||||
|
||||
# Create a pipe that we send bogus data to when we want to wake
|
||||
# the I/O loop when it is idle
|
||||
self._waker = Waker()
|
||||
self.add_handler(self._waker.fileno(),
|
||||
lambda fd, events: self._waker.consume(),
|
||||
self.READ)
|
||||
_current = threading.local()
|
||||
|
||||
@staticmethod
|
||||
def instance():
|
||||
@@ -166,6 +157,43 @@ class IOLoop(object):
|
||||
assert not IOLoop.initialized()
|
||||
IOLoop._instance = self
|
||||
|
||||
@staticmethod
|
||||
def current():
|
||||
current = getattr(IOLoop._current, "instance", None)
|
||||
if current is None:
|
||||
raise ValueError("no current IOLoop")
|
||||
return current
|
||||
|
||||
def make_current(self):
|
||||
IOLoop._current.instance = self
|
||||
|
||||
def clear_current(self):
|
||||
assert IOLoop._current.instance is self
|
||||
IOLoop._current.instance = None
|
||||
|
||||
@classmethod
|
||||
def configurable_base(cls):
|
||||
return IOLoop
|
||||
|
||||
@classmethod
|
||||
def configurable_default(cls):
|
||||
if hasattr(select, "epoll") or sys.platform.startswith('linux'):
|
||||
try:
|
||||
from tornado.platform.epoll import EPollIOLoop
|
||||
return EPollIOLoop
|
||||
except ImportError:
|
||||
gen_log.warning("unable to import EPollIOLoop, falling back to SelectIOLoop")
|
||||
pass
|
||||
if hasattr(select, "kqueue"):
|
||||
# Python 2.6+ on BSD or Mac
|
||||
from tornado.platform.kqueue import KQueueIOLoop
|
||||
return KQueueIOLoop
|
||||
from tornado.platform.select import SelectIOLoop
|
||||
return SelectIOLoop
|
||||
|
||||
def initialize(self):
|
||||
pass
|
||||
|
||||
def close(self, all_fds=False):
|
||||
"""Closes the IOLoop, freeing any resources used.
|
||||
|
||||
@@ -185,33 +213,19 @@ class IOLoop(object):
|
||||
Therefore the call to `close` will usually appear just after
|
||||
the call to `start` rather than near the call to `stop`.
|
||||
"""
|
||||
self.remove_handler(self._waker.fileno())
|
||||
if all_fds:
|
||||
for fd in self._handlers.keys()[:]:
|
||||
try:
|
||||
os.close(fd)
|
||||
except Exception:
|
||||
logging.debug("error closing fd %s", fd, exc_info=True)
|
||||
self._waker.close()
|
||||
self._impl.close()
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_handler(self, fd, handler, events):
|
||||
"""Registers the given handler to receive the given events for fd."""
|
||||
self._handlers[fd] = stack_context.wrap(handler)
|
||||
self._impl.register(fd, events | self.ERROR)
|
||||
raise NotImplementedError()
|
||||
|
||||
def update_handler(self, fd, events):
|
||||
"""Changes the events we listen for fd."""
|
||||
self._impl.modify(fd, events | self.ERROR)
|
||||
raise NotImplementedError()
|
||||
|
||||
def remove_handler(self, fd):
|
||||
"""Stop listening for events on fd."""
|
||||
self._handlers.pop(fd, None)
|
||||
self._events.pop(fd, None)
|
||||
try:
|
||||
self._impl.unregister(fd)
|
||||
except (OSError, IOError):
|
||||
logging.debug("Error deleting fd from IOLoop", exc_info=True)
|
||||
raise NotImplementedError()
|
||||
|
||||
def set_blocking_signal_threshold(self, seconds, action):
|
||||
"""Sends a signal if the ioloop is blocked for more than s seconds.
|
||||
@@ -224,14 +238,7 @@ class IOLoop(object):
|
||||
If action is None, the process will be killed if it is blocked for
|
||||
too long.
|
||||
"""
|
||||
if not hasattr(signal, "setitimer"):
|
||||
logging.error("set_blocking_signal_threshold requires a signal module "
|
||||
"with the setitimer method")
|
||||
return
|
||||
self._blocking_signal_threshold = seconds
|
||||
if seconds is not None:
|
||||
signal.signal(signal.SIGALRM,
|
||||
action if action is not None else signal.SIG_DFL)
|
||||
raise NotImplementedError()
|
||||
|
||||
def set_blocking_log_threshold(self, seconds):
|
||||
"""Logs a stack trace if the ioloop is blocked for more than s seconds.
|
||||
@@ -244,9 +251,9 @@ class IOLoop(object):
|
||||
|
||||
For use with set_blocking_signal_threshold.
|
||||
"""
|
||||
logging.warning('IOLoop blocked for %f seconds in\n%s',
|
||||
self._blocking_signal_threshold,
|
||||
''.join(traceback.format_stack(frame)))
|
||||
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
|
||||
self._blocking_signal_threshold,
|
||||
''.join(traceback.format_stack(frame)))
|
||||
|
||||
def start(self):
|
||||
"""Starts the I/O loop.
|
||||
@@ -254,11 +261,249 @@ class IOLoop(object):
|
||||
The loop will run until one of the I/O handlers calls stop(), which
|
||||
will make the loop stop after the current event iteration completes.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def stop(self):
|
||||
"""Stop the loop after the current event loop iteration is complete.
|
||||
If the event loop is not currently running, the next call to start()
|
||||
will return immediately.
|
||||
|
||||
To use asynchronous methods from otherwise-synchronous code (such as
|
||||
unit tests), you can start and stop the event loop like this::
|
||||
|
||||
ioloop = IOLoop()
|
||||
async_method(ioloop=ioloop, callback=ioloop.stop)
|
||||
ioloop.start()
|
||||
|
||||
ioloop.start() will return after async_method has run its callback,
|
||||
whether that callback was invoked before or after ioloop.start.
|
||||
|
||||
Note that even after `stop` has been called, the IOLoop is not
|
||||
completely stopped until `IOLoop.start` has also returned.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def time(self):
|
||||
"""Returns the current time according to the IOLoop's clock.
|
||||
|
||||
The return value is a floating-point number relative to an
|
||||
unspecified time in the past.
|
||||
|
||||
By default, the IOLoop's time function is `time.time`. However,
|
||||
it may be configured to use e.g. `time.monotonic` instead.
|
||||
Calls to `add_timeout` that pass a number instead of a
|
||||
`datetime.timedelta` should use this function to compute the
|
||||
appropriate time, so they can work no matter what time function
|
||||
is chosen.
|
||||
"""
|
||||
return time.time()
|
||||
|
||||
def add_timeout(self, deadline, callback):
|
||||
"""Calls the given callback at the time deadline from the I/O loop.
|
||||
|
||||
Returns a handle that may be passed to remove_timeout to cancel.
|
||||
|
||||
``deadline`` may be a number denoting a time relative to
|
||||
`IOLoop.time`, or a ``datetime.timedelta`` object for a
|
||||
deadline relative to the current time.
|
||||
|
||||
Note that it is not safe to call `add_timeout` from other threads.
|
||||
Instead, you must use `add_callback` to transfer control to the
|
||||
IOLoop's thread, and then call `add_timeout` from there.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def remove_timeout(self, timeout):
|
||||
"""Cancels a pending timeout.
|
||||
|
||||
The argument is a handle as returned by add_timeout.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_callback(self, callback):
|
||||
"""Calls the given callback on the next I/O loop iteration.
|
||||
|
||||
It is safe to call this method from any thread at any time,
|
||||
except from a signal handler. Note that this is the *only*
|
||||
method in IOLoop that makes this thread-safety guarantee; all
|
||||
other interaction with the IOLoop must be done from that
|
||||
IOLoop's thread. add_callback() may be used to transfer
|
||||
control from other threads to the IOLoop's thread.
|
||||
|
||||
To add a callback from a signal handler, see
|
||||
`add_callback_from_signal`.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_callback_from_signal(self, callback):
|
||||
"""Calls the given callback on the next I/O loop iteration.
|
||||
|
||||
Safe for use from a Python signal handler; should not be used
|
||||
otherwise.
|
||||
|
||||
Callbacks added with this method will be run without any
|
||||
stack_context, to avoid picking up the context of the function
|
||||
that was interrupted by the signal.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
if futures is not None:
|
||||
_FUTURE_TYPES = (futures.Future, DummyFuture)
|
||||
else:
|
||||
_FUTURE_TYPES = DummyFuture
|
||||
def add_future(self, future, callback):
|
||||
"""Schedules a callback on the IOLoop when the given future is finished.
|
||||
|
||||
The callback is invoked with one argument, the future.
|
||||
"""
|
||||
assert isinstance(future, IOLoop._FUTURE_TYPES)
|
||||
callback = stack_context.wrap(callback)
|
||||
future.add_done_callback(
|
||||
lambda future: self.add_callback(
|
||||
functools.partial(callback, future)))
|
||||
|
||||
def _run_callback(self, callback):
|
||||
"""Runs a callback with error handling.
|
||||
|
||||
For use in subclasses.
|
||||
"""
|
||||
try:
|
||||
callback()
|
||||
except Exception:
|
||||
self.handle_callback_exception(callback)
|
||||
|
||||
def handle_callback_exception(self, callback):
|
||||
"""This method is called whenever a callback run by the IOLoop
|
||||
throws an exception.
|
||||
|
||||
By default simply logs the exception as an error. Subclasses
|
||||
may override this method to customize reporting of exceptions.
|
||||
|
||||
The exception itself is not passed explicitly, but is available
|
||||
in sys.exc_info.
|
||||
"""
|
||||
app_log.error("Exception in callback %r", callback, exc_info=True)
|
||||
|
||||
|
||||
|
||||
class PollIOLoop(IOLoop):
|
||||
"""Base class for IOLoops built around a select-like function.
|
||||
|
||||
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
|
||||
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
|
||||
`tornado.platform.select.SelectIOLoop` (all platforms).
|
||||
"""
|
||||
def initialize(self, impl, time_func=None):
|
||||
super(PollIOLoop, self).initialize()
|
||||
self._impl = impl
|
||||
if hasattr(self._impl, 'fileno'):
|
||||
set_close_exec(self._impl.fileno())
|
||||
self.time_func = time_func or time.time
|
||||
self._handlers = {}
|
||||
self._events = {}
|
||||
self._callbacks = []
|
||||
self._callback_lock = threading.Lock()
|
||||
self._timeouts = []
|
||||
self._running = False
|
||||
self._stopped = False
|
||||
self._closing = False
|
||||
self._thread_ident = None
|
||||
self._blocking_signal_threshold = None
|
||||
|
||||
# Create a pipe that we send bogus data to when we want to wake
|
||||
# the I/O loop when it is idle
|
||||
self._waker = Waker()
|
||||
self.add_handler(self._waker.fileno(),
|
||||
lambda fd, events: self._waker.consume(),
|
||||
self.READ)
|
||||
|
||||
def close(self, all_fds=False):
|
||||
with self._callback_lock:
|
||||
self._closing = True
|
||||
self.remove_handler(self._waker.fileno())
|
||||
if all_fds:
|
||||
for fd in self._handlers.keys()[:]:
|
||||
try:
|
||||
os.close(fd)
|
||||
except Exception:
|
||||
gen_log.debug("error closing fd %s", fd, exc_info=True)
|
||||
self._waker.close()
|
||||
self._impl.close()
|
||||
|
||||
def add_handler(self, fd, handler, events):
|
||||
self._handlers[fd] = stack_context.wrap(handler)
|
||||
self._impl.register(fd, events | self.ERROR)
|
||||
|
||||
def update_handler(self, fd, events):
|
||||
self._impl.modify(fd, events | self.ERROR)
|
||||
|
||||
def remove_handler(self, fd):
|
||||
self._handlers.pop(fd, None)
|
||||
self._events.pop(fd, None)
|
||||
try:
|
||||
self._impl.unregister(fd)
|
||||
except (OSError, IOError):
|
||||
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
|
||||
|
||||
def set_blocking_signal_threshold(self, seconds, action):
|
||||
if not hasattr(signal, "setitimer"):
|
||||
gen_log.error("set_blocking_signal_threshold requires a signal module "
|
||||
"with the setitimer method")
|
||||
return
|
||||
self._blocking_signal_threshold = seconds
|
||||
if seconds is not None:
|
||||
signal.signal(signal.SIGALRM,
|
||||
action if action is not None else signal.SIG_DFL)
|
||||
|
||||
def start(self):
|
||||
if not logging.getLogger().handlers:
|
||||
# The IOLoop catches and logs exceptions, so it's
|
||||
# important that log output be visible. However, python's
|
||||
# default behavior for non-root loggers (prior to python
|
||||
# 3.2) is to print an unhelpful "no handlers could be
|
||||
# found" message rather than the actual log entry, so we
|
||||
# must explicitly configure logging if we've made it this
|
||||
# far without anything.
|
||||
logging.basicConfig()
|
||||
if self._stopped:
|
||||
self._stopped = False
|
||||
return
|
||||
old_current = getattr(IOLoop._current, "instance", None)
|
||||
IOLoop._current.instance = self
|
||||
self._thread_ident = thread.get_ident()
|
||||
self._running = True
|
||||
|
||||
# signal.set_wakeup_fd closes a race condition in event loops:
|
||||
# a signal may arrive at the beginning of select/poll/etc
|
||||
# before it goes into its interruptible sleep, so the signal
|
||||
# will be consumed without waking the select. The solution is
|
||||
# for the (C, synchronous) signal handler to write to a pipe,
|
||||
# which will then be seen by select.
|
||||
#
|
||||
# In python's signal handling semantics, this only matters on the
|
||||
# main thread (fortunately, set_wakeup_fd only works on the main
|
||||
# thread and will raise a ValueError otherwise).
|
||||
#
|
||||
# If someone has already set a wakeup fd, we don't want to
|
||||
# disturb it. This is an issue for twisted, which does its
|
||||
# SIGCHILD processing in response to its own wakeup fd being
|
||||
# written to. As long as the wakeup fd is registered on the IOLoop,
|
||||
# the loop will still wake up and everything should work.
|
||||
old_wakeup_fd = None
|
||||
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
|
||||
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
|
||||
# the python process on windows.
|
||||
try:
|
||||
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
|
||||
if old_wakeup_fd != -1:
|
||||
# Already set, restore previous value. This is a little racy,
|
||||
# but there's no clean get_wakeup_fd and in real use the
|
||||
# IOLoop is just started once at the beginning.
|
||||
signal.set_wakeup_fd(old_wakeup_fd)
|
||||
old_wakeup_fd = None
|
||||
except ValueError: # non-main thread
|
||||
pass
|
||||
|
||||
while True:
|
||||
poll_timeout = 3600.0
|
||||
|
||||
@@ -271,7 +516,7 @@ class IOLoop(object):
|
||||
self._run_callback(callback)
|
||||
|
||||
if self._timeouts:
|
||||
now = time.time()
|
||||
now = self.time()
|
||||
while self._timeouts:
|
||||
if self._timeouts[0].callback is None:
|
||||
# the timeout was cancelled
|
||||
@@ -330,64 +575,33 @@ class IOLoop(object):
|
||||
# Happens when the client closes the connection
|
||||
pass
|
||||
else:
|
||||
logging.error("Exception in I/O handler for fd %s",
|
||||
app_log.error("Exception in I/O handler for fd %s",
|
||||
fd, exc_info=True)
|
||||
except Exception:
|
||||
logging.error("Exception in I/O handler for fd %s",
|
||||
app_log.error("Exception in I/O handler for fd %s",
|
||||
fd, exc_info=True)
|
||||
# reset the stopped flag so another start/stop pair can be issued
|
||||
self._stopped = False
|
||||
if self._blocking_signal_threshold is not None:
|
||||
signal.setitimer(signal.ITIMER_REAL, 0, 0)
|
||||
IOLoop._current.instance = old_current
|
||||
if old_wakeup_fd is not None:
|
||||
signal.set_wakeup_fd(old_wakeup_fd)
|
||||
|
||||
def stop(self):
|
||||
"""Stop the loop after the current event loop iteration is complete.
|
||||
If the event loop is not currently running, the next call to start()
|
||||
will return immediately.
|
||||
|
||||
To use asynchronous methods from otherwise-synchronous code (such as
|
||||
unit tests), you can start and stop the event loop like this::
|
||||
|
||||
ioloop = IOLoop()
|
||||
async_method(ioloop=ioloop, callback=ioloop.stop)
|
||||
ioloop.start()
|
||||
|
||||
ioloop.start() will return after async_method has run its callback,
|
||||
whether that callback was invoked before or after ioloop.start.
|
||||
|
||||
Note that even after `stop` has been called, the IOLoop is not
|
||||
completely stopped until `IOLoop.start` has also returned.
|
||||
"""
|
||||
self._running = False
|
||||
self._stopped = True
|
||||
self._waker.wake()
|
||||
|
||||
def running(self):
|
||||
"""Returns true if this IOLoop is currently running."""
|
||||
return self._running
|
||||
def time(self):
|
||||
return self.time_func()
|
||||
|
||||
def add_timeout(self, deadline, callback):
|
||||
"""Calls the given callback at the time deadline from the I/O loop.
|
||||
|
||||
Returns a handle that may be passed to remove_timeout to cancel.
|
||||
|
||||
``deadline`` may be a number denoting a unix timestamp (as returned
|
||||
by ``time.time()`` or a ``datetime.timedelta`` object for a deadline
|
||||
relative to the current time.
|
||||
|
||||
Note that it is not safe to call `add_timeout` from other threads.
|
||||
Instead, you must use `add_callback` to transfer control to the
|
||||
IOLoop's thread, and then call `add_timeout` from there.
|
||||
"""
|
||||
timeout = _Timeout(deadline, stack_context.wrap(callback))
|
||||
timeout = _Timeout(deadline, stack_context.wrap(callback), self)
|
||||
heapq.heappush(self._timeouts, timeout)
|
||||
return timeout
|
||||
|
||||
def remove_timeout(self, timeout):
|
||||
"""Cancels a pending timeout.
|
||||
|
||||
The argument is a handle as returned by add_timeout.
|
||||
"""
|
||||
# Removing from a heap is complicated, so just leave the defunct
|
||||
# timeout object in the queue (see discussion in
|
||||
# http://docs.python.org/library/heapq.html).
|
||||
@@ -396,15 +610,9 @@ class IOLoop(object):
|
||||
timeout.callback = None
|
||||
|
||||
def add_callback(self, callback):
|
||||
"""Calls the given callback on the next I/O loop iteration.
|
||||
|
||||
It is safe to call this method from any thread at any time.
|
||||
Note that this is the *only* method in IOLoop that makes this
|
||||
guarantee; all other interaction with the IOLoop must be done
|
||||
from that IOLoop's thread. add_callback() may be used to transfer
|
||||
control from other threads to the IOLoop's thread.
|
||||
"""
|
||||
with self._callback_lock:
|
||||
if self._closing:
|
||||
raise RuntimeError("IOLoop is closing")
|
||||
list_empty = not self._callbacks
|
||||
self._callbacks.append(stack_context.wrap(callback))
|
||||
if list_empty and thread.get_ident() != self._thread_ident:
|
||||
@@ -416,23 +624,22 @@ class IOLoop(object):
|
||||
# avoid it when we can.
|
||||
self._waker.wake()
|
||||
|
||||
def _run_callback(self, callback):
|
||||
try:
|
||||
callback()
|
||||
except Exception:
|
||||
self.handle_callback_exception(callback)
|
||||
|
||||
def handle_callback_exception(self, callback):
|
||||
"""This method is called whenever a callback run by the IOLoop
|
||||
throws an exception.
|
||||
|
||||
By default simply logs the exception as an error. Subclasses
|
||||
may override this method to customize reporting of exceptions.
|
||||
|
||||
The exception itself is not passed explicitly, but is available
|
||||
in sys.exc_info.
|
||||
"""
|
||||
logging.error("Exception in callback %r", callback, exc_info=True)
|
||||
def add_callback_from_signal(self, callback):
|
||||
with stack_context.NullContext():
|
||||
if thread.get_ident() != self._thread_ident:
|
||||
# if the signal is handled on another thread, we can add
|
||||
# it normally (modulo the NullContext)
|
||||
self.add_callback(callback)
|
||||
else:
|
||||
# If we're on the IOLoop's thread, we cannot use
|
||||
# the regular add_callback because it may deadlock on
|
||||
# _callback_lock. Blindly insert into self._callbacks.
|
||||
# This is safe because the GIL makes list.append atomic.
|
||||
# One subtlety is that if the signal interrupted the
|
||||
# _callback_lock block in IOLoop.start, we may modify
|
||||
# either the old or new version of self._callbacks,
|
||||
# but either way will work.
|
||||
self._callbacks.append(stack_context.wrap(callback))
|
||||
|
||||
|
||||
class _Timeout(object):
|
||||
@@ -441,11 +648,11 @@ class _Timeout(object):
|
||||
# Reduce memory overhead when there are lots of pending callbacks
|
||||
__slots__ = ['deadline', 'callback']
|
||||
|
||||
def __init__(self, deadline, callback):
|
||||
def __init__(self, deadline, callback, io_loop):
|
||||
if isinstance(deadline, (int, long, float)):
|
||||
self.deadline = deadline
|
||||
elif isinstance(deadline, datetime.timedelta):
|
||||
self.deadline = time.time() + _Timeout.timedelta_to_seconds(deadline)
|
||||
self.deadline = io_loop.time() + _Timeout.timedelta_to_seconds(deadline)
|
||||
else:
|
||||
raise TypeError("Unsupported deadline %r" % deadline)
|
||||
self.callback = callback
|
||||
@@ -477,6 +684,8 @@ class PeriodicCallback(object):
|
||||
"""
|
||||
def __init__(self, callback, callback_time, io_loop=None):
|
||||
self.callback = callback
|
||||
if callback_time <= 0:
|
||||
raise ValueError("Periodic callback must have a positive callback_time")
|
||||
self.callback_time = callback_time
|
||||
self.io_loop = io_loop or IOLoop.instance()
|
||||
self._running = False
|
||||
@@ -485,7 +694,7 @@ class PeriodicCallback(object):
|
||||
def start(self):
|
||||
"""Starts the timer."""
|
||||
self._running = True
|
||||
self._next_timeout = time.time()
|
||||
self._next_timeout = self.io_loop.time()
|
||||
self._schedule_next()
|
||||
|
||||
def stop(self):
|
||||
@@ -501,172 +710,12 @@ class PeriodicCallback(object):
|
||||
try:
|
||||
self.callback()
|
||||
except Exception:
|
||||
logging.error("Error in periodic callback", exc_info=True)
|
||||
app_log.error("Error in periodic callback", exc_info=True)
|
||||
self._schedule_next()
|
||||
|
||||
def _schedule_next(self):
|
||||
if self._running:
|
||||
current_time = time.time()
|
||||
current_time = self.io_loop.time()
|
||||
while self._next_timeout <= current_time:
|
||||
self._next_timeout += self.callback_time / 1000.0
|
||||
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
||||
|
||||
|
||||
class _EPoll(object):
|
||||
"""An epoll-based event loop using our C module for Python 2.5 systems"""
|
||||
_EPOLL_CTL_ADD = 1
|
||||
_EPOLL_CTL_DEL = 2
|
||||
_EPOLL_CTL_MOD = 3
|
||||
|
||||
def __init__(self):
|
||||
self._epoll_fd = epoll.epoll_create()
|
||||
|
||||
def fileno(self):
|
||||
return self._epoll_fd
|
||||
|
||||
def close(self):
|
||||
os.close(self._epoll_fd)
|
||||
|
||||
def register(self, fd, events):
|
||||
epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_ADD, fd, events)
|
||||
|
||||
def modify(self, fd, events):
|
||||
epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_MOD, fd, events)
|
||||
|
||||
def unregister(self, fd):
|
||||
epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_DEL, fd, 0)
|
||||
|
||||
def poll(self, timeout):
|
||||
return epoll.epoll_wait(self._epoll_fd, int(timeout * 1000))
|
||||
|
||||
|
||||
class _KQueue(object):
|
||||
"""A kqueue-based event loop for BSD/Mac systems."""
|
||||
def __init__(self):
|
||||
self._kqueue = select.kqueue()
|
||||
self._active = {}
|
||||
|
||||
def fileno(self):
|
||||
return self._kqueue.fileno()
|
||||
|
||||
def close(self):
|
||||
self._kqueue.close()
|
||||
|
||||
def register(self, fd, events):
|
||||
if fd in self._active:
|
||||
raise IOError("fd %d already registered" % fd)
|
||||
self._control(fd, events, select.KQ_EV_ADD)
|
||||
self._active[fd] = events
|
||||
|
||||
def modify(self, fd, events):
|
||||
self.unregister(fd)
|
||||
self.register(fd, events)
|
||||
|
||||
def unregister(self, fd):
|
||||
events = self._active.pop(fd)
|
||||
self._control(fd, events, select.KQ_EV_DELETE)
|
||||
|
||||
def _control(self, fd, events, flags):
|
||||
kevents = []
|
||||
if events & IOLoop.WRITE:
|
||||
kevents.append(select.kevent(
|
||||
fd, filter=select.KQ_FILTER_WRITE, flags=flags))
|
||||
if events & IOLoop.READ or not kevents:
|
||||
# Always read when there is not a write
|
||||
kevents.append(select.kevent(
|
||||
fd, filter=select.KQ_FILTER_READ, flags=flags))
|
||||
# Even though control() takes a list, it seems to return EINVAL
|
||||
# on Mac OS X (10.6) when there is more than one event in the list.
|
||||
for kevent in kevents:
|
||||
self._kqueue.control([kevent], 0)
|
||||
|
||||
def poll(self, timeout):
|
||||
kevents = self._kqueue.control(None, 1000, timeout)
|
||||
events = {}
|
||||
for kevent in kevents:
|
||||
fd = kevent.ident
|
||||
if kevent.filter == select.KQ_FILTER_READ:
|
||||
events[fd] = events.get(fd, 0) | IOLoop.READ
|
||||
if kevent.filter == select.KQ_FILTER_WRITE:
|
||||
if kevent.flags & select.KQ_EV_EOF:
|
||||
# If an asynchronous connection is refused, kqueue
|
||||
# returns a write event with the EOF flag set.
|
||||
# Turn this into an error for consistency with the
|
||||
# other IOLoop implementations.
|
||||
# Note that for read events, EOF may be returned before
|
||||
# all data has been consumed from the socket buffer,
|
||||
# so we only check for EOF on write events.
|
||||
events[fd] = IOLoop.ERROR
|
||||
else:
|
||||
events[fd] = events.get(fd, 0) | IOLoop.WRITE
|
||||
if kevent.flags & select.KQ_EV_ERROR:
|
||||
events[fd] = events.get(fd, 0) | IOLoop.ERROR
|
||||
return events.items()
|
||||
|
||||
|
||||
class _Select(object):
|
||||
"""A simple, select()-based IOLoop implementation for non-Linux systems"""
|
||||
def __init__(self):
|
||||
self.read_fds = set()
|
||||
self.write_fds = set()
|
||||
self.error_fds = set()
|
||||
self.fd_sets = (self.read_fds, self.write_fds, self.error_fds)
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
def register(self, fd, events):
|
||||
if fd in self.read_fds or fd in self.write_fds or fd in self.error_fds:
|
||||
raise IOError("fd %d already registered" % fd)
|
||||
if events & IOLoop.READ:
|
||||
self.read_fds.add(fd)
|
||||
if events & IOLoop.WRITE:
|
||||
self.write_fds.add(fd)
|
||||
if events & IOLoop.ERROR:
|
||||
self.error_fds.add(fd)
|
||||
# Closed connections are reported as errors by epoll and kqueue,
|
||||
# but as zero-byte reads by select, so when errors are requested
|
||||
# we need to listen for both read and error.
|
||||
self.read_fds.add(fd)
|
||||
|
||||
def modify(self, fd, events):
|
||||
self.unregister(fd)
|
||||
self.register(fd, events)
|
||||
|
||||
def unregister(self, fd):
|
||||
self.read_fds.discard(fd)
|
||||
self.write_fds.discard(fd)
|
||||
self.error_fds.discard(fd)
|
||||
|
||||
def poll(self, timeout):
|
||||
readable, writeable, errors = select.select(
|
||||
self.read_fds, self.write_fds, self.error_fds, timeout)
|
||||
events = {}
|
||||
for fd in readable:
|
||||
events[fd] = events.get(fd, 0) | IOLoop.READ
|
||||
for fd in writeable:
|
||||
events[fd] = events.get(fd, 0) | IOLoop.WRITE
|
||||
for fd in errors:
|
||||
events[fd] = events.get(fd, 0) | IOLoop.ERROR
|
||||
return events.items()
|
||||
|
||||
|
||||
# Choose a poll implementation. Use epoll if it is available, fall back to
|
||||
# select() for non-Linux platforms
|
||||
if hasattr(select, "epoll"):
|
||||
# Python 2.6+ on Linux
|
||||
_poll = select.epoll
|
||||
elif hasattr(select, "kqueue"):
|
||||
# Python 2.6+ on BSD or Mac
|
||||
_poll = _KQueue
|
||||
else:
|
||||
try:
|
||||
# Linux systems with our C module installed
|
||||
from tornado import epoll
|
||||
_poll = _EPoll
|
||||
except Exception:
|
||||
# All other systems
|
||||
import sys
|
||||
if "linux" in sys.platform:
|
||||
logging.warning("epoll module not found; using select()")
|
||||
_poll = _Select
|
||||
|
||||
@@ -14,19 +14,27 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""A utility class to write to and read from a non-blocking socket."""
|
||||
"""Utility classes to write to and read from non-blocking files and sockets.
|
||||
|
||||
Contents:
|
||||
|
||||
* `BaseIOStream`: Generic interface for reading and writing.
|
||||
* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
|
||||
* `SSLIOStream`: SSL-aware version of IOStream.
|
||||
* `PipeIOStream`: Pipe-based IOStream implementation.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import collections
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import re
|
||||
|
||||
from tornado import ioloop
|
||||
from tornado.log import gen_log, app_log
|
||||
from tornado import stack_context
|
||||
from tornado.util import b, bytes_type
|
||||
|
||||
@@ -35,56 +43,29 @@ try:
|
||||
except ImportError:
|
||||
ssl = None
|
||||
|
||||
try:
|
||||
from tornado.platform.posix import _set_nonblocking
|
||||
except ImportError:
|
||||
_set_nonblocking = None
|
||||
|
||||
class IOStream(object):
|
||||
r"""A utility class to write to and read from a non-blocking socket.
|
||||
class StreamClosedError(IOError):
|
||||
pass
|
||||
|
||||
class BaseIOStream(object):
|
||||
"""A utility class to write to and read from a non-blocking file or socket.
|
||||
|
||||
We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
|
||||
All of the methods take callbacks (since writing and reading are
|
||||
non-blocking and asynchronous).
|
||||
|
||||
The socket parameter may either be connected or unconnected. For
|
||||
server operations the socket is the result of calling socket.accept().
|
||||
For client operations the socket is created with socket.socket(),
|
||||
and may either be connected before passing it to the IOStream or
|
||||
connected with IOStream.connect.
|
||||
|
||||
When a stream is closed due to an error, the IOStream's `error`
|
||||
attribute contains the exception object.
|
||||
|
||||
A very simple (and broken) HTTP client using this class::
|
||||
|
||||
from tornado import ioloop
|
||||
from tornado import iostream
|
||||
import socket
|
||||
|
||||
def send_request():
|
||||
stream.write("GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
|
||||
stream.read_until("\r\n\r\n", on_headers)
|
||||
|
||||
def on_headers(data):
|
||||
headers = {}
|
||||
for line in data.split("\r\n"):
|
||||
parts = line.split(":")
|
||||
if len(parts) == 2:
|
||||
headers[parts[0].strip()] = parts[1].strip()
|
||||
stream.read_bytes(int(headers["Content-Length"]), on_body)
|
||||
|
||||
def on_body(data):
|
||||
print data
|
||||
stream.close()
|
||||
ioloop.IOLoop.instance().stop()
|
||||
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
|
||||
stream = iostream.IOStream(s)
|
||||
stream.connect(("friendfeed.com", 80), send_request)
|
||||
ioloop.IOLoop.instance().start()
|
||||
|
||||
Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
|
||||
`read_from_fd`, and optionally `get_fd_error`.
|
||||
"""
|
||||
def __init__(self, socket, io_loop=None, max_buffer_size=104857600,
|
||||
def __init__(self, io_loop=None, max_buffer_size=104857600,
|
||||
read_chunk_size=4096):
|
||||
self.socket = socket
|
||||
self.socket.setblocking(False)
|
||||
self.io_loop = io_loop or ioloop.IOLoop.instance()
|
||||
self.max_buffer_size = max_buffer_size
|
||||
self.read_chunk_size = read_chunk_size
|
||||
@@ -105,40 +86,45 @@ class IOStream(object):
|
||||
self._connecting = False
|
||||
self._state = None
|
||||
self._pending_callbacks = 0
|
||||
self._closed = False
|
||||
|
||||
def connect(self, address, callback=None):
|
||||
"""Connects the socket to a remote address without blocking.
|
||||
def fileno(self):
|
||||
"""Returns the file descriptor for this stream."""
|
||||
raise NotImplementedError()
|
||||
|
||||
May only be called if the socket passed to the constructor was
|
||||
not previously connected. The address parameter is in the
|
||||
same format as for socket.connect, i.e. a (host, port) tuple.
|
||||
If callback is specified, it will be called when the
|
||||
connection is completed.
|
||||
def close_fd(self):
|
||||
"""Closes the file underlying this stream.
|
||||
|
||||
Note that it is safe to call IOStream.write while the
|
||||
connection is pending, in which case the data will be written
|
||||
as soon as the connection is ready. Calling IOStream read
|
||||
methods before the socket is connected works on some platforms
|
||||
but is non-portable.
|
||||
``close_fd`` is called by `BaseIOStream` and should not be called
|
||||
elsewhere; other users should call `close` instead.
|
||||
"""
|
||||
self._connecting = True
|
||||
try:
|
||||
self.socket.connect(address)
|
||||
except socket.error, e:
|
||||
# In non-blocking mode we expect connect() to raise an
|
||||
# exception with EINPROGRESS or EWOULDBLOCK.
|
||||
#
|
||||
# On freebsd, other errors such as ECONNREFUSED may be
|
||||
# returned immediately when attempting to connect to
|
||||
# localhost, so handle them the same way as an error
|
||||
# reported later in _handle_connect.
|
||||
if e.args[0] not in (errno.EINPROGRESS, errno.EWOULDBLOCK):
|
||||
logging.warning("Connect error on fd %d: %s",
|
||||
self.socket.fileno(), e)
|
||||
self.close()
|
||||
return
|
||||
self._connect_callback = stack_context.wrap(callback)
|
||||
self._add_io_state(self.io_loop.WRITE)
|
||||
raise NotImplementedError()
|
||||
|
||||
def write_to_fd(self, data):
|
||||
"""Attempts to write ``data`` to the underlying file.
|
||||
|
||||
Returns the number of bytes written.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def read_from_fd(self):
|
||||
"""Attempts to read from the underlying file.
|
||||
|
||||
Returns ``None`` if there was nothing to read (the socket returned
|
||||
EWOULDBLOCK or equivalent), otherwise returns the data. When possible,
|
||||
should return no more than ``self.read_chunk_size`` bytes at a time.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_fd_error(self):
|
||||
"""Returns information about any error on the underlying file.
|
||||
|
||||
This method is called after the IOLoop has signaled an error on the
|
||||
file descriptor, and should return an Exception (such as `socket.error`
|
||||
with additional information, or None if no such information is
|
||||
available.
|
||||
"""
|
||||
return None
|
||||
|
||||
def read_until_regex(self, regex, callback):
|
||||
"""Call callback when we read the given regex pattern."""
|
||||
@@ -176,8 +162,14 @@ class IOStream(object):
|
||||
a ``streaming_callback`` is not used.
|
||||
"""
|
||||
self._set_read_callback(callback)
|
||||
self._streaming_callback = stack_context.wrap(streaming_callback)
|
||||
if self.closed():
|
||||
self._run_callback(callback, self._consume(self._read_buffer_size))
|
||||
if self._streaming_callback is not None:
|
||||
self._run_callback(self._streaming_callback,
|
||||
self._consume(self._read_buffer_size))
|
||||
self._run_callback(self._read_callback,
|
||||
self._consume(self._read_buffer_size))
|
||||
self._streaming_callback = None
|
||||
self._read_callback = None
|
||||
return
|
||||
self._read_until_close = True
|
||||
@@ -207,10 +199,11 @@ class IOStream(object):
|
||||
else:
|
||||
self._write_buffer.append(data)
|
||||
self._write_callback = stack_context.wrap(callback)
|
||||
self._handle_write()
|
||||
if self._write_buffer:
|
||||
self._add_io_state(self.io_loop.WRITE)
|
||||
self._maybe_add_error_listener()
|
||||
if not self._connecting:
|
||||
self._handle_write()
|
||||
if self._write_buffer:
|
||||
self._add_io_state(self.io_loop.WRITE)
|
||||
self._maybe_add_error_listener()
|
||||
|
||||
def set_close_callback(self, callback):
|
||||
"""Call the given callback when the stream is closed."""
|
||||
@@ -218,7 +211,7 @@ class IOStream(object):
|
||||
|
||||
def close(self):
|
||||
"""Close this stream."""
|
||||
if self.socket is not None:
|
||||
if not self.closed():
|
||||
if any(sys.exc_info()):
|
||||
self.error = sys.exc_info()[1]
|
||||
if self._read_until_close:
|
||||
@@ -228,14 +221,14 @@ class IOStream(object):
|
||||
self._run_callback(callback,
|
||||
self._consume(self._read_buffer_size))
|
||||
if self._state is not None:
|
||||
self.io_loop.remove_handler(self.socket.fileno())
|
||||
self.io_loop.remove_handler(self.fileno())
|
||||
self._state = None
|
||||
self.socket.close()
|
||||
self.socket = None
|
||||
self.close_fd()
|
||||
self._closed = True
|
||||
self._maybe_run_close_callback()
|
||||
|
||||
def _maybe_run_close_callback(self):
|
||||
if (self.socket is None and self._close_callback and
|
||||
if (self.closed() and self._close_callback and
|
||||
self._pending_callbacks == 0):
|
||||
# if there are pending callbacks, don't run the close callback
|
||||
# until they're done (see _maybe_add_error_handler)
|
||||
@@ -253,27 +246,25 @@ class IOStream(object):
|
||||
|
||||
def closed(self):
|
||||
"""Returns true if the stream has been closed."""
|
||||
return self.socket is None
|
||||
return self._closed
|
||||
|
||||
def _handle_events(self, fd, events):
|
||||
if not self.socket:
|
||||
logging.warning("Got events for closed stream %d", fd)
|
||||
if self.closed():
|
||||
gen_log.warning("Got events for closed stream %d", fd)
|
||||
return
|
||||
try:
|
||||
if events & self.io_loop.READ:
|
||||
self._handle_read()
|
||||
if not self.socket:
|
||||
if self.closed():
|
||||
return
|
||||
if events & self.io_loop.WRITE:
|
||||
if self._connecting:
|
||||
self._handle_connect()
|
||||
self._handle_write()
|
||||
if not self.socket:
|
||||
if self.closed():
|
||||
return
|
||||
if events & self.io_loop.ERROR:
|
||||
errno = self.socket.getsockopt(socket.SOL_SOCKET,
|
||||
socket.SO_ERROR)
|
||||
self.error = socket.error(errno, os.strerror(errno))
|
||||
self.error = self.get_fd_error()
|
||||
# We may have queued up a user callback in _handle_read or
|
||||
# _handle_write, so don't close the IOStream until those
|
||||
# callbacks have had a chance to run.
|
||||
@@ -290,9 +281,9 @@ class IOStream(object):
|
||||
assert self._state is not None, \
|
||||
"shouldn't happen: _handle_events without self._state"
|
||||
self._state = state
|
||||
self.io_loop.update_handler(self.socket.fileno(), self._state)
|
||||
self.io_loop.update_handler(self.fileno(), self._state)
|
||||
except Exception:
|
||||
logging.error("Uncaught exception, closing connection.",
|
||||
gen_log.error("Uncaught exception, closing connection.",
|
||||
exc_info=True)
|
||||
self.close()
|
||||
raise
|
||||
@@ -303,7 +294,7 @@ class IOStream(object):
|
||||
try:
|
||||
callback(*args)
|
||||
except Exception:
|
||||
logging.error("Uncaught exception, closing connection.",
|
||||
app_log.error("Uncaught exception, closing connection.",
|
||||
exc_info=True)
|
||||
# Close the socket on an uncaught exception from a user callback
|
||||
# (It would eventually get closed when the socket object is
|
||||
@@ -345,7 +336,7 @@ class IOStream(object):
|
||||
# clause below (which calls `close` and does need to
|
||||
# trigger the callback)
|
||||
self._pending_callbacks += 1
|
||||
while True:
|
||||
while not self.closed():
|
||||
# Read from the socket until we get EWOULDBLOCK or equivalent.
|
||||
# SSL sockets do some internal buffering, and if the data is
|
||||
# sitting in the SSL object's buffer select() and friends
|
||||
@@ -356,7 +347,7 @@ class IOStream(object):
|
||||
finally:
|
||||
self._pending_callbacks -= 1
|
||||
except Exception:
|
||||
logging.warning("error on read", exc_info=True)
|
||||
gen_log.warning("error on read", exc_info=True)
|
||||
self.close()
|
||||
return
|
||||
if self._read_from_buffer():
|
||||
@@ -382,33 +373,14 @@ class IOStream(object):
|
||||
try:
|
||||
# See comments in _handle_read about incrementing _pending_callbacks
|
||||
self._pending_callbacks += 1
|
||||
while True:
|
||||
while not self.closed():
|
||||
if self._read_to_buffer() == 0:
|
||||
break
|
||||
self._check_closed()
|
||||
finally:
|
||||
self._pending_callbacks -= 1
|
||||
if self._read_from_buffer():
|
||||
return
|
||||
self._add_io_state(self.io_loop.READ)
|
||||
|
||||
def _read_from_socket(self):
|
||||
"""Attempts to read from the socket.
|
||||
|
||||
Returns the data read or None if there is nothing to read.
|
||||
May be overridden in subclasses.
|
||||
"""
|
||||
try:
|
||||
chunk = self.socket.recv(self.read_chunk_size)
|
||||
except socket.error, e:
|
||||
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
|
||||
return None
|
||||
else:
|
||||
raise
|
||||
if not chunk:
|
||||
self.close()
|
||||
return None
|
||||
return chunk
|
||||
self._maybe_add_error_listener()
|
||||
|
||||
def _read_to_buffer(self):
|
||||
"""Reads from the socket and appends the result to the read buffer.
|
||||
@@ -418,11 +390,15 @@ class IOStream(object):
|
||||
error closes the socket and raises an exception.
|
||||
"""
|
||||
try:
|
||||
chunk = self._read_from_socket()
|
||||
except socket.error, e:
|
||||
chunk = self.read_from_fd()
|
||||
except (socket.error, IOError, OSError), e:
|
||||
# ssl.SSLError is a subclass of socket.error
|
||||
logging.warning("Read error on %d: %s",
|
||||
self.socket.fileno(), e)
|
||||
if e.args[0] == errno.ECONNRESET:
|
||||
# Treat ECONNRESET as a connection close rather than
|
||||
# an error to minimize log spam (the exception will
|
||||
# be available on self.error for apps that care).
|
||||
self.close()
|
||||
return
|
||||
self.close()
|
||||
raise
|
||||
if chunk is None:
|
||||
@@ -430,7 +406,7 @@ class IOStream(object):
|
||||
self._read_buffer.append(chunk)
|
||||
self._read_buffer_size += len(chunk)
|
||||
if self._read_buffer_size >= self.max_buffer_size:
|
||||
logging.error("Reached maximum read buffer size")
|
||||
gen_log.error("Reached maximum read buffer size")
|
||||
self.close()
|
||||
raise IOError("Reached maximum read buffer size")
|
||||
return len(chunk)
|
||||
@@ -495,24 +471,6 @@ class IOStream(object):
|
||||
_double_prefix(self._read_buffer)
|
||||
return False
|
||||
|
||||
def _handle_connect(self):
|
||||
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||
if err != 0:
|
||||
self.error = socket.error(err, os.strerror(err))
|
||||
# IOLoop implementations may vary: some of them return
|
||||
# an error state before the socket becomes writable, so
|
||||
# in that case a connection failure would be handled by the
|
||||
# error path in _handle_events instead of here.
|
||||
logging.warning("Connect error on fd %d: %s",
|
||||
self.socket.fileno(), errno.errorcode[err])
|
||||
self.close()
|
||||
return
|
||||
if self._connect_callback is not None:
|
||||
callback = self._connect_callback
|
||||
self._connect_callback = None
|
||||
self._run_callback(callback)
|
||||
self._connecting = False
|
||||
|
||||
def _handle_write(self):
|
||||
while self._write_buffer:
|
||||
try:
|
||||
@@ -523,7 +481,7 @@ class IOStream(object):
|
||||
# process. Therefore we must not call socket.send
|
||||
# with more than 128KB at a time.
|
||||
_merge_prefix(self._write_buffer, 128 * 1024)
|
||||
num_bytes = self.socket.send(self._write_buffer[0])
|
||||
num_bytes = self.write_to_fd(self._write_buffer[0])
|
||||
if num_bytes == 0:
|
||||
# With OpenSSL, if we couldn't write the entire buffer,
|
||||
# the very same string object must be used on the
|
||||
@@ -543,8 +501,8 @@ class IOStream(object):
|
||||
self._write_buffer_frozen = True
|
||||
break
|
||||
else:
|
||||
logging.warning("Write error on %d: %s",
|
||||
self.socket.fileno(), e)
|
||||
gen_log.warning("Write error on %d: %s",
|
||||
self.fileno(), e)
|
||||
self.close()
|
||||
return
|
||||
if not self._write_buffer and self._write_callback:
|
||||
@@ -560,12 +518,12 @@ class IOStream(object):
|
||||
return self._read_buffer.popleft()
|
||||
|
||||
def _check_closed(self):
|
||||
if not self.socket:
|
||||
raise IOError("Stream is closed")
|
||||
if self.closed():
|
||||
raise StreamClosedError("Stream is closed")
|
||||
|
||||
def _maybe_add_error_listener(self):
|
||||
if self._state is None and self._pending_callbacks == 0:
|
||||
if self.socket is None:
|
||||
if self.closed():
|
||||
self._maybe_run_close_callback()
|
||||
else:
|
||||
self._add_io_state(ioloop.IOLoop.READ)
|
||||
@@ -591,17 +549,143 @@ class IOStream(object):
|
||||
(since the write callback is optional so we can have a
|
||||
fast-path write with no `_run_callback`)
|
||||
"""
|
||||
if self.socket is None:
|
||||
if self.closed():
|
||||
# connection has been closed, so there can be no future events
|
||||
return
|
||||
if self._state is None:
|
||||
self._state = ioloop.IOLoop.ERROR | state
|
||||
with stack_context.NullContext():
|
||||
self.io_loop.add_handler(
|
||||
self.socket.fileno(), self._handle_events, self._state)
|
||||
self.fileno(), self._handle_events, self._state)
|
||||
elif not self._state & state:
|
||||
self._state = self._state | state
|
||||
self.io_loop.update_handler(self.socket.fileno(), self._state)
|
||||
self.io_loop.update_handler(self.fileno(), self._state)
|
||||
|
||||
|
||||
class IOStream(BaseIOStream):
|
||||
r"""Socket-based IOStream implementation.
|
||||
|
||||
This class supports the read and write methods from `BaseIOStream`
|
||||
plus a `connect` method.
|
||||
|
||||
The socket parameter may either be connected or unconnected. For
|
||||
server operations the socket is the result of calling socket.accept().
|
||||
For client operations the socket is created with socket.socket(),
|
||||
and may either be connected before passing it to the IOStream or
|
||||
connected with IOStream.connect.
|
||||
|
||||
A very simple (and broken) HTTP client using this class::
|
||||
|
||||
from tornado import ioloop
|
||||
from tornado import iostream
|
||||
import socket
|
||||
|
||||
def send_request():
|
||||
stream.write("GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
|
||||
stream.read_until("\r\n\r\n", on_headers)
|
||||
|
||||
def on_headers(data):
|
||||
headers = {}
|
||||
for line in data.split("\r\n"):
|
||||
parts = line.split(":")
|
||||
if len(parts) == 2:
|
||||
headers[parts[0].strip()] = parts[1].strip()
|
||||
stream.read_bytes(int(headers["Content-Length"]), on_body)
|
||||
|
||||
def on_body(data):
|
||||
print data
|
||||
stream.close()
|
||||
ioloop.IOLoop.instance().stop()
|
||||
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
|
||||
stream = iostream.IOStream(s)
|
||||
stream.connect(("friendfeed.com", 80), send_request)
|
||||
ioloop.IOLoop.instance().start()
|
||||
"""
|
||||
def __init__(self, socket, *args, **kwargs):
|
||||
self.socket = socket
|
||||
self.socket.setblocking(False)
|
||||
super(IOStream, self).__init__(*args, **kwargs)
|
||||
|
||||
def fileno(self):
|
||||
return self.socket.fileno()
|
||||
|
||||
def close_fd(self):
|
||||
self.socket.close()
|
||||
self.socket = None
|
||||
|
||||
def get_fd_error(self):
|
||||
errno = self.socket.getsockopt(socket.SOL_SOCKET,
|
||||
socket.SO_ERROR)
|
||||
return socket.error(errno, os.strerror(errno))
|
||||
|
||||
def read_from_fd(self):
|
||||
try:
|
||||
chunk = self.socket.recv(self.read_chunk_size)
|
||||
except socket.error, e:
|
||||
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
|
||||
return None
|
||||
else:
|
||||
raise
|
||||
if not chunk:
|
||||
self.close()
|
||||
return None
|
||||
return chunk
|
||||
|
||||
def write_to_fd(self, data):
|
||||
return self.socket.send(data)
|
||||
|
||||
def connect(self, address, callback=None):
|
||||
"""Connects the socket to a remote address without blocking.
|
||||
|
||||
May only be called if the socket passed to the constructor was
|
||||
not previously connected. The address parameter is in the
|
||||
same format as for socket.connect, i.e. a (host, port) tuple.
|
||||
If callback is specified, it will be called when the
|
||||
connection is completed.
|
||||
|
||||
Note that it is safe to call IOStream.write while the
|
||||
connection is pending, in which case the data will be written
|
||||
as soon as the connection is ready. Calling IOStream read
|
||||
methods before the socket is connected works on some platforms
|
||||
but is non-portable.
|
||||
"""
|
||||
self._connecting = True
|
||||
try:
|
||||
self.socket.connect(address)
|
||||
except socket.error, e:
|
||||
# In non-blocking mode we expect connect() to raise an
|
||||
# exception with EINPROGRESS or EWOULDBLOCK.
|
||||
#
|
||||
# On freebsd, other errors such as ECONNREFUSED may be
|
||||
# returned immediately when attempting to connect to
|
||||
# localhost, so handle them the same way as an error
|
||||
# reported later in _handle_connect.
|
||||
if e.args[0] not in (errno.EINPROGRESS, errno.EWOULDBLOCK):
|
||||
gen_log.warning("Connect error on fd %d: %s",
|
||||
self.socket.fileno(), e)
|
||||
self.close()
|
||||
return
|
||||
self._connect_callback = stack_context.wrap(callback)
|
||||
self._add_io_state(self.io_loop.WRITE)
|
||||
|
||||
def _handle_connect(self):
|
||||
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||
if err != 0:
|
||||
self.error = socket.error(err, os.strerror(err))
|
||||
# IOLoop implementations may vary: some of them return
|
||||
# an error state before the socket becomes writable, so
|
||||
# in that case a connection failure would be handled by the
|
||||
# error path in _handle_events instead of here.
|
||||
gen_log.warning("Connect error on fd %d: %s",
|
||||
self.socket.fileno(), errno.errorcode[err])
|
||||
self.close()
|
||||
return
|
||||
if self._connect_callback is not None:
|
||||
callback = self._connect_callback
|
||||
self._connect_callback = None
|
||||
self._run_callback(callback)
|
||||
self._connecting = False
|
||||
|
||||
|
||||
class SSLIOStream(IOStream):
|
||||
@@ -626,6 +710,7 @@ class SSLIOStream(IOStream):
|
||||
self._ssl_accepting = True
|
||||
self._handshake_reading = False
|
||||
self._handshake_writing = False
|
||||
self._ssl_connect_callback = None
|
||||
|
||||
def reading(self):
|
||||
return self._handshake_reading or super(SSLIOStream, self).reading()
|
||||
@@ -650,7 +735,12 @@ class SSLIOStream(IOStream):
|
||||
ssl.SSL_ERROR_ZERO_RETURN):
|
||||
return self.close()
|
||||
elif err.args[0] == ssl.SSL_ERROR_SSL:
|
||||
logging.warning("SSL Error on %d: %s", self.socket.fileno(), err)
|
||||
try:
|
||||
peer = self.socket.getpeername()
|
||||
except:
|
||||
peer = '(not connected)'
|
||||
gen_log.warning("SSL Error on %d %s: %s",
|
||||
self.socket.fileno(), peer, err)
|
||||
return self.close()
|
||||
raise
|
||||
except socket.error, err:
|
||||
@@ -658,7 +748,10 @@ class SSLIOStream(IOStream):
|
||||
return self.close()
|
||||
else:
|
||||
self._ssl_accepting = False
|
||||
super(SSLIOStream, self)._handle_connect()
|
||||
if self._ssl_connect_callback is not None:
|
||||
callback = self._ssl_connect_callback
|
||||
self._ssl_connect_callback = None
|
||||
self._run_callback(callback)
|
||||
|
||||
def _handle_read(self):
|
||||
if self._ssl_accepting:
|
||||
@@ -672,16 +765,25 @@ class SSLIOStream(IOStream):
|
||||
return
|
||||
super(SSLIOStream, self)._handle_write()
|
||||
|
||||
def connect(self, address, callback=None):
|
||||
# Save the user's callback and run it after the ssl handshake
|
||||
# has completed.
|
||||
self._ssl_connect_callback = callback
|
||||
super(SSLIOStream, self).connect(address, callback=None)
|
||||
|
||||
def _handle_connect(self):
|
||||
# When the connection is complete, wrap the socket for SSL
|
||||
# traffic. Note that we do this by overriding _handle_connect
|
||||
# instead of by passing a callback to super().connect because
|
||||
# user callbacks are enqueued asynchronously on the IOLoop,
|
||||
# but since _handle_events calls _handle_connect immediately
|
||||
# followed by _handle_write we need this to be synchronous.
|
||||
self.socket = ssl.wrap_socket(self.socket,
|
||||
do_handshake_on_connect=False,
|
||||
**self._ssl_options)
|
||||
# Don't call the superclass's _handle_connect (which is responsible
|
||||
# for telling the application that the connection is complete)
|
||||
# until we've completed the SSL handshake (so certificates are
|
||||
# available, etc).
|
||||
super(SSLIOStream, self)._handle_connect()
|
||||
|
||||
def _read_from_socket(self):
|
||||
def read_from_fd(self):
|
||||
if self._ssl_accepting:
|
||||
# If the handshake hasn't finished yet, there can't be anything
|
||||
# to read (attempting to read may or may not raise an exception
|
||||
@@ -711,6 +813,44 @@ class SSLIOStream(IOStream):
|
||||
return None
|
||||
return chunk
|
||||
|
||||
class PipeIOStream(BaseIOStream):
|
||||
"""Pipe-based IOStream implementation.
|
||||
|
||||
The constructor takes an integer file descriptor (such as one returned
|
||||
by `os.pipe`) rather than an open file object.
|
||||
"""
|
||||
def __init__(self, fd, *args, **kwargs):
|
||||
self.fd = fd
|
||||
_set_nonblocking(fd)
|
||||
super(PipeIOStream, self).__init__(*args, **kwargs)
|
||||
|
||||
def fileno(self):
|
||||
return self.fd
|
||||
|
||||
def close_fd(self):
|
||||
os.close(self.fd)
|
||||
|
||||
def write_to_fd(self, data):
|
||||
return os.write(self.fd, data)
|
||||
|
||||
def read_from_fd(self):
|
||||
try:
|
||||
chunk = os.read(self.fd, self.read_chunk_size)
|
||||
except (IOError, OSError), e:
|
||||
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
|
||||
return None
|
||||
elif e.args[0] == errno.EBADF:
|
||||
# If the writing half of a pipe is closed, select will
|
||||
# report it as readable but reads will fail with EBADF.
|
||||
self.close()
|
||||
return None
|
||||
else:
|
||||
raise
|
||||
if not chunk:
|
||||
self.close()
|
||||
return None
|
||||
return chunk
|
||||
|
||||
|
||||
def _double_prefix(deque):
|
||||
"""Grow by doubling, but don't split the second chunk just because the
|
||||
|
||||
@@ -43,11 +43,11 @@ from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import csv
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
from tornado import escape
|
||||
from tornado.log import gen_log
|
||||
|
||||
_default_locale = "en_US"
|
||||
_translations = {}
|
||||
@@ -118,7 +118,7 @@ def load_translations(directory):
|
||||
continue
|
||||
locale, extension = path.split(".")
|
||||
if not re.match("[a-z]+(_[A-Z]+)?$", locale):
|
||||
logging.error("Unrecognized locale %r (path: %s)", locale,
|
||||
gen_log.error("Unrecognized locale %r (path: %s)", locale,
|
||||
os.path.join(directory, path))
|
||||
continue
|
||||
full_path = os.path.join(directory, path)
|
||||
@@ -142,13 +142,13 @@ def load_translations(directory):
|
||||
else:
|
||||
plural = "unknown"
|
||||
if plural not in ("plural", "singular", "unknown"):
|
||||
logging.error("Unrecognized plural indicator %r in %s line %d",
|
||||
gen_log.error("Unrecognized plural indicator %r in %s line %d",
|
||||
plural, path, i + 1)
|
||||
continue
|
||||
_translations[locale].setdefault(plural, {})[english] = translation
|
||||
f.close()
|
||||
_supported_locales = frozenset(_translations.keys() + [_default_locale])
|
||||
logging.debug("Supported locales: %s", sorted(_supported_locales))
|
||||
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
|
||||
|
||||
|
||||
def load_gettext_translations(directory, domain):
|
||||
@@ -184,11 +184,11 @@ def load_gettext_translations(directory, domain):
|
||||
_translations[lang] = gettext.translation(domain, directory,
|
||||
languages=[lang])
|
||||
except Exception, e:
|
||||
logging.error("Cannot load translation for '%s': %s", lang, str(e))
|
||||
gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
|
||||
continue
|
||||
_supported_locales = frozenset(_translations.keys() + [_default_locale])
|
||||
_use_gettext = True
|
||||
logging.debug("Supported locales: %s", sorted(_supported_locales))
|
||||
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
|
||||
|
||||
|
||||
def get_supported_locales():
|
||||
|
||||
194
libs/tornado/log.py
Executable file
194
libs/tornado/log.py
Executable file
@@ -0,0 +1,194 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2012 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Logging support for Tornado.
|
||||
|
||||
Tornado uses three logger streams:
|
||||
|
||||
* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and
|
||||
potentially other servers in the future)
|
||||
* ``tornado.application``: Logging of errors from application code (i.e.
|
||||
uncaught exceptions from callbacks)
|
||||
* ``tornado.general``: General-purpose logging, including any errors
|
||||
or warnings from Tornado itself.
|
||||
|
||||
These streams may be configured independently using the standard library's
|
||||
`logging` module. For example, you may wish to send ``tornado.access`` logs
|
||||
to a separate file for analysis.
|
||||
"""
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
|
||||
from tornado.escape import _unicode
|
||||
|
||||
try:
|
||||
import curses
|
||||
except ImportError:
|
||||
curses = None
|
||||
|
||||
# Logger objects for internal tornado use
|
||||
access_log = logging.getLogger("tornado.access")
|
||||
app_log = logging.getLogger("tornado.application")
|
||||
gen_log = logging.getLogger("tornado.general")
|
||||
|
||||
def _stderr_supports_color():
|
||||
color = False
|
||||
if curses and sys.stderr.isatty():
|
||||
try:
|
||||
curses.setupterm()
|
||||
if curses.tigetnum("colors") > 0:
|
||||
color = True
|
||||
except Exception:
|
||||
pass
|
||||
return color
|
||||
|
||||
|
||||
class LogFormatter(logging.Formatter):
|
||||
"""Log formatter used in Tornado.
|
||||
|
||||
Key features of this formatter are:
|
||||
|
||||
* Color support when logging to a terminal that supports it.
|
||||
* Timestamps on every log line.
|
||||
* Robust against str/bytes encoding problems.
|
||||
|
||||
This formatter is enabled automatically by
|
||||
`tornado.options.parse_command_line` (unless ``--logging=none`` is
|
||||
used).
|
||||
"""
|
||||
def __init__(self, color=True, *args, **kwargs):
|
||||
logging.Formatter.__init__(self, *args, **kwargs)
|
||||
self._color = color and _stderr_supports_color()
|
||||
if self._color:
|
||||
# The curses module has some str/bytes confusion in
|
||||
# python3. Until version 3.2.3, most methods return
|
||||
# bytes, but only accept strings. In addition, we want to
|
||||
# output these strings with the logging module, which
|
||||
# works with unicode strings. The explicit calls to
|
||||
# unicode() below are harmless in python2 but will do the
|
||||
# right conversion in python 3.
|
||||
fg_color = (curses.tigetstr("setaf") or
|
||||
curses.tigetstr("setf") or "")
|
||||
if (3, 0) < sys.version_info < (3, 2, 3):
|
||||
fg_color = unicode(fg_color, "ascii")
|
||||
self._colors = {
|
||||
logging.DEBUG: unicode(curses.tparm(fg_color, 4), # Blue
|
||||
"ascii"),
|
||||
logging.INFO: unicode(curses.tparm(fg_color, 2), # Green
|
||||
"ascii"),
|
||||
logging.WARNING: unicode(curses.tparm(fg_color, 3), # Yellow
|
||||
"ascii"),
|
||||
logging.ERROR: unicode(curses.tparm(fg_color, 1), # Red
|
||||
"ascii"),
|
||||
}
|
||||
self._normal = unicode(curses.tigetstr("sgr0"), "ascii")
|
||||
|
||||
def format(self, record):
|
||||
try:
|
||||
record.message = record.getMessage()
|
||||
except Exception, e:
|
||||
record.message = "Bad message (%r): %r" % (e, record.__dict__)
|
||||
assert isinstance(record.message, basestring) # guaranteed by logging
|
||||
record.asctime = time.strftime(
|
||||
"%y%m%d %H:%M:%S", self.converter(record.created))
|
||||
prefix = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]' % \
|
||||
record.__dict__
|
||||
if self._color:
|
||||
prefix = (self._colors.get(record.levelno, self._normal) +
|
||||
prefix + self._normal)
|
||||
|
||||
# Encoding notes: The logging module prefers to work with character
|
||||
# strings, but only enforces that log messages are instances of
|
||||
# basestring. In python 2, non-ascii bytestrings will make
|
||||
# their way through the logging framework until they blow up with
|
||||
# an unhelpful decoding error (with this formatter it happens
|
||||
# when we attach the prefix, but there are other opportunities for
|
||||
# exceptions further along in the framework).
|
||||
#
|
||||
# If a byte string makes it this far, convert it to unicode to
|
||||
# ensure it will make it out to the logs. Use repr() as a fallback
|
||||
# to ensure that all byte strings can be converted successfully,
|
||||
# but don't do it by default so we don't add extra quotes to ascii
|
||||
# bytestrings. This is a bit of a hacky place to do this, but
|
||||
# it's worth it since the encoding errors that would otherwise
|
||||
# result are so useless (and tornado is fond of using utf8-encoded
|
||||
# byte strings whereever possible).
|
||||
try:
|
||||
message = _unicode(record.message)
|
||||
except UnicodeDecodeError:
|
||||
message = repr(record.message)
|
||||
|
||||
formatted = prefix + " " + message
|
||||
if record.exc_info:
|
||||
if not record.exc_text:
|
||||
record.exc_text = self.formatException(record.exc_info)
|
||||
if record.exc_text:
|
||||
formatted = formatted.rstrip() + "\n" + record.exc_text
|
||||
return formatted.replace("\n", "\n ")
|
||||
|
||||
def enable_pretty_logging(options=None):
|
||||
"""Turns on formatted logging output as configured.
|
||||
|
||||
This is called automaticaly by `tornado.options.parse_command_line`
|
||||
and `tornado.options.parse_config_file`.
|
||||
"""
|
||||
if options is None:
|
||||
from tornado.options import options
|
||||
if options.logging == 'none':
|
||||
return
|
||||
root_logger = logging.getLogger()
|
||||
root_logger.setLevel(getattr(logging, options.logging.upper()))
|
||||
if options.log_file_prefix:
|
||||
channel = logging.handlers.RotatingFileHandler(
|
||||
filename=options.log_file_prefix,
|
||||
maxBytes=options.log_file_max_size,
|
||||
backupCount=options.log_file_num_backups)
|
||||
channel.setFormatter(LogFormatter(color=False))
|
||||
root_logger.addHandler(channel)
|
||||
|
||||
if (options.log_to_stderr or
|
||||
(options.log_to_stderr is None and not root_logger.handlers)):
|
||||
# Set up color if we are in a tty and curses is installed
|
||||
channel = logging.StreamHandler()
|
||||
channel.setFormatter(LogFormatter())
|
||||
root_logger.addHandler(channel)
|
||||
|
||||
|
||||
def define_logging_options(options=None):
|
||||
if options is None:
|
||||
# late import to prevent cycle
|
||||
from tornado.options import options
|
||||
options.define("logging", default="info",
|
||||
help=("Set the Python log level. If 'none', tornado won't touch the "
|
||||
"logging configuration."),
|
||||
metavar="debug|info|warning|error|none")
|
||||
options.define("log_to_stderr", type=bool, default=None,
|
||||
help=("Send log output to stderr (colorized if possible). "
|
||||
"By default use stderr if --log_file_prefix is not set and "
|
||||
"no other logging is configured."))
|
||||
options.define("log_file_prefix", type=str, default=None, metavar="PATH",
|
||||
help=("Path prefix for log files. "
|
||||
"Note that if you are running multiple tornado processes, "
|
||||
"log_file_prefix must be different for each of them (e.g. "
|
||||
"include the port number)"))
|
||||
options.define("log_file_max_size", type=int, default=100 * 1000 * 1000,
|
||||
help="max size of log files before rollover")
|
||||
options.define("log_file_num_backups", type=int, default=10,
|
||||
help="number of log files to keep")
|
||||
|
||||
options.add_parse_callback(enable_pretty_logging)
|
||||
@@ -19,14 +19,15 @@
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import stat
|
||||
|
||||
from tornado import process
|
||||
from tornado.concurrent import dummy_executor, run_on_executor
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.iostream import IOStream, SSLIOStream
|
||||
from tornado.log import app_log
|
||||
from tornado.platform.auto import set_close_exec
|
||||
|
||||
try:
|
||||
@@ -234,10 +235,10 @@ class TCPServer(object):
|
||||
stream = IOStream(connection, io_loop=self.io_loop)
|
||||
self.handle_stream(stream, address)
|
||||
except Exception:
|
||||
logging.error("Error in connection callback", exc_info=True)
|
||||
app_log.error("Error in connection callback", exc_info=True)
|
||||
|
||||
|
||||
def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128):
|
||||
def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags=None):
|
||||
"""Creates listening sockets bound to the given port and address.
|
||||
|
||||
Returns a list of socket objects (multiple sockets are returned if
|
||||
@@ -253,11 +254,15 @@ def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128):
|
||||
|
||||
The ``backlog`` argument has the same meaning as for
|
||||
``socket.listen()``.
|
||||
|
||||
``flags`` is a bitmask of AI_* flags to ``getaddrinfo``, like
|
||||
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
|
||||
"""
|
||||
sockets = []
|
||||
if address == "":
|
||||
address = None
|
||||
flags = socket.AI_PASSIVE
|
||||
if flags is None:
|
||||
flags = socket.AI_PASSIVE
|
||||
for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM,
|
||||
0, flags)):
|
||||
af, socktype, proto, canonname, sockaddr = res
|
||||
@@ -335,3 +340,13 @@ def add_accept_handler(sock, callback, io_loop=None):
|
||||
raise
|
||||
callback(connection, address)
|
||||
io_loop.add_handler(sock.fileno(), accept_handler, IOLoop.READ)
|
||||
|
||||
|
||||
class Resolver(object):
|
||||
def __init__(self, io_loop=None, executor=None):
|
||||
self.io_loop = io_loop or IOLoop.instance()
|
||||
self.executor = executor or dummy_executor
|
||||
|
||||
@run_on_executor
|
||||
def getaddrinfo(self, *args, **kwargs):
|
||||
return socket.getaddrinfo(*args, **kwargs)
|
||||
|
||||
@@ -16,7 +16,8 @@
|
||||
|
||||
"""A command line parsing module that lets modules define their own options.
|
||||
|
||||
Each module defines its own options, e.g.::
|
||||
Each module defines its own options which are added to the global
|
||||
option namespace, e.g.::
|
||||
|
||||
from tornado.options import define, options
|
||||
|
||||
@@ -30,12 +31,15 @@ Each module defines its own options, e.g.::
|
||||
|
||||
The main() method of your application does not need to be aware of all of
|
||||
the options used throughout your program; they are all automatically loaded
|
||||
when the modules are loaded. Your main() method can parse the command line
|
||||
or parse a config file with::
|
||||
when the modules are loaded. However, all modules that define options
|
||||
must have been imported before the command line is parsed.
|
||||
|
||||
Your main() method can parse the command line or parse a config file with
|
||||
either::
|
||||
|
||||
import tornado.options
|
||||
tornado.options.parse_config_file("/etc/server.conf")
|
||||
tornado.options.parse_command_line()
|
||||
# or
|
||||
tornado.options.parse_config_file("/etc/server.conf")
|
||||
|
||||
Command line formats are what you would expect ("--myoption=myvalue").
|
||||
Config files are just Python files. Global names become options, e.g.::
|
||||
@@ -46,26 +50,24 @@ Config files are just Python files. Global names become options, e.g.::
|
||||
We support datetimes, timedeltas, ints, and floats (just pass a 'type'
|
||||
kwarg to define). We also accept multi-value options. See the documentation
|
||||
for define() below.
|
||||
|
||||
`tornado.options.options` is a singleton instance of `OptionParser`, and
|
||||
the top-level functions in this module (`define`, `parse_command_line`, etc)
|
||||
simply call methods on it. You may create additional `OptionParser`
|
||||
instances to define isolated sets of options, such as for subcommands.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import logging.handlers
|
||||
import re
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import textwrap
|
||||
|
||||
from tornado.escape import _unicode
|
||||
|
||||
# For pretty log messages, if available
|
||||
try:
|
||||
import curses
|
||||
except ImportError:
|
||||
curses = None
|
||||
from tornado.log import define_logging_options
|
||||
from tornado import stack_context
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
@@ -73,27 +75,68 @@ class Error(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class _Options(dict):
|
||||
class OptionParser(object):
|
||||
"""A collection of options, a dictionary with object-like access.
|
||||
|
||||
Normally accessed via static functions in the `tornado.options` module,
|
||||
which reference a global instance.
|
||||
"""
|
||||
def __init__(self):
|
||||
# we have to use self.__dict__ because we override setattr.
|
||||
self.__dict__['_options'] = {}
|
||||
self.__dict__['_parse_callbacks'] = []
|
||||
self.define("help", type=bool, help="show this help information",
|
||||
callback=self._help_callback)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if isinstance(self.get(name), _Option):
|
||||
return self[name].value()
|
||||
if isinstance(self._options.get(name), _Option):
|
||||
return self._options[name].value()
|
||||
raise AttributeError("Unrecognized option %r" % name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if isinstance(self.get(name), _Option):
|
||||
return self[name].set(value)
|
||||
if isinstance(self._options.get(name), _Option):
|
||||
return self._options[name].set(value)
|
||||
raise AttributeError("Unrecognized option %r" % name)
|
||||
|
||||
def define(self, name, default=None, type=None, help=None, metavar=None,
|
||||
multiple=False, group=None):
|
||||
if name in self:
|
||||
multiple=False, group=None, callback=None):
|
||||
"""Defines a new command line option.
|
||||
|
||||
If type is given (one of str, float, int, datetime, or timedelta)
|
||||
or can be inferred from the default, we parse the command line
|
||||
arguments based on the given type. If multiple is True, we accept
|
||||
comma-separated values, and the option value is always a list.
|
||||
|
||||
For multi-value integers, we also accept the syntax x:y, which
|
||||
turns into range(x, y) - very useful for long integer ranges.
|
||||
|
||||
help and metavar are used to construct the automatically generated
|
||||
command line help string. The help message is formatted like::
|
||||
|
||||
--name=METAVAR help string
|
||||
|
||||
group is used to group the defined options in logical
|
||||
groups. By default, command line options are grouped by the
|
||||
file in which they are defined.
|
||||
|
||||
Command line option names must be unique globally. They can be parsed
|
||||
from the command line with parse_command_line() or parsed from a
|
||||
config file with parse_config_file.
|
||||
|
||||
If a callback is given, it will be run with the new value whenever
|
||||
the option is changed. This can be used to combine command-line
|
||||
and file-based options::
|
||||
|
||||
define("config", type=str, help="path to config file",
|
||||
callback=lambda path: parse_config_file(path, final=False))
|
||||
|
||||
With this definition, options in the file specified by ``--config`` will
|
||||
override options set earlier on the command line, but can be overridden
|
||||
by later flags.
|
||||
"""
|
||||
if name in self._options:
|
||||
raise Error("Option %r already defined in %s", name,
|
||||
self[name].file_name)
|
||||
self._options[name].file_name)
|
||||
frame = sys._getframe(0)
|
||||
options_file = frame.f_code.co_filename
|
||||
file_name = frame.f_back.f_code.co_filename
|
||||
@@ -108,11 +151,23 @@ class _Options(dict):
|
||||
group_name = group
|
||||
else:
|
||||
group_name = file_name
|
||||
self[name] = _Option(name, file_name=file_name, default=default,
|
||||
type=type, help=help, metavar=metavar,
|
||||
multiple=multiple, group_name=group_name)
|
||||
self._options[name] = _Option(name, file_name=file_name,
|
||||
default=default, type=type, help=help,
|
||||
metavar=metavar, multiple=multiple,
|
||||
group_name=group_name,
|
||||
callback=callback)
|
||||
|
||||
def parse_command_line(self, args=None):
|
||||
def parse_command_line(self, args=None, final=True):
|
||||
"""Parses all options given on the command line (defaults to sys.argv).
|
||||
|
||||
Note that args[0] is ignored since it is the program name in sys.argv.
|
||||
|
||||
We return a list of all arguments that are not parsed as options.
|
||||
|
||||
If ``final`` is ``False``, parse callbacks will not be run.
|
||||
This is useful for applications that wish to combine configurations
|
||||
from multiple sources.
|
||||
"""
|
||||
if args is None:
|
||||
args = sys.argv
|
||||
remaining = []
|
||||
@@ -127,40 +182,46 @@ class _Options(dict):
|
||||
arg = args[i].lstrip("-")
|
||||
name, equals, value = arg.partition("=")
|
||||
name = name.replace('-', '_')
|
||||
if not name in self:
|
||||
print_help()
|
||||
if not name in self._options:
|
||||
self.print_help()
|
||||
raise Error('Unrecognized command line option: %r' % name)
|
||||
option = self[name]
|
||||
option = self._options[name]
|
||||
if not equals:
|
||||
if option.type == bool:
|
||||
value = "true"
|
||||
else:
|
||||
raise Error('Option %r requires a value' % name)
|
||||
option.parse(value)
|
||||
if self.help:
|
||||
print_help()
|
||||
sys.exit(0)
|
||||
|
||||
# Set up log level and pretty console logging by default
|
||||
if self.logging != 'none':
|
||||
logging.getLogger().setLevel(getattr(logging, self.logging.upper()))
|
||||
enable_pretty_logging()
|
||||
if final:
|
||||
self.run_parse_callbacks()
|
||||
|
||||
return remaining
|
||||
|
||||
def parse_config_file(self, path):
|
||||
def parse_config_file(self, path, final=True):
|
||||
"""Parses and loads the Python config file at the given path.
|
||||
|
||||
If ``final`` is ``False``, parse callbacks will not be run.
|
||||
This is useful for applications that wish to combine configurations
|
||||
from multiple sources.
|
||||
"""
|
||||
config = {}
|
||||
execfile(path, config, config)
|
||||
for name in config:
|
||||
if name in self:
|
||||
self[name].set(config[name])
|
||||
if name in self._options:
|
||||
self._options[name].set(config[name])
|
||||
|
||||
def print_help(self, file=sys.stdout):
|
||||
"""Prints all the command line options to stdout."""
|
||||
if final:
|
||||
self.run_parse_callbacks()
|
||||
|
||||
def print_help(self, file=None):
|
||||
"""Prints all the command line options to stderr (or another file)."""
|
||||
if file is None:
|
||||
file = sys.stderr
|
||||
print >> file, "Usage: %s [OPTIONS]" % sys.argv[0]
|
||||
print >> file, "\nOptions:\n"
|
||||
by_group = {}
|
||||
for option in self.itervalues():
|
||||
for option in self._options.itervalues():
|
||||
by_group.setdefault(option.group_name, []).append(option)
|
||||
|
||||
for filename, o in sorted(by_group.items()):
|
||||
@@ -182,10 +243,67 @@ class _Options(dict):
|
||||
print >> file, "%-34s %s" % (' ', line)
|
||||
print >> file
|
||||
|
||||
def _help_callback(self, value):
|
||||
if value:
|
||||
self.print_help()
|
||||
sys.exit(0)
|
||||
|
||||
def add_parse_callback(self, callback):
|
||||
"""Adds a parse callback, to be invoked when option parsing is done."""
|
||||
self._parse_callbacks.append(stack_context.wrap(callback))
|
||||
|
||||
def run_parse_callbacks(self):
|
||||
for callback in self._parse_callbacks:
|
||||
callback()
|
||||
|
||||
def mockable(self):
|
||||
"""Returns a wrapper around self that is compatible with `mock.patch`.
|
||||
|
||||
The `mock.patch` function (included in the standard library
|
||||
`unittest.mock` package since Python 3.3, or in the
|
||||
third-party `mock` package for older versions of Python) is
|
||||
incompatible with objects like ``options`` that override
|
||||
``__getattr__`` and ``__setattr__``. This function returns an
|
||||
object that can be used with `mock.patch.object` to modify
|
||||
option values::
|
||||
|
||||
with mock.patch.object(options.mockable(), 'name', value):
|
||||
assert options.name == value
|
||||
"""
|
||||
return _Mockable(self)
|
||||
|
||||
class _Mockable(object):
|
||||
"""`mock.patch` compatible wrapper for `OptionParser`.
|
||||
|
||||
As of ``mock`` version 1.0.1, when an object uses ``__getattr__``
|
||||
hooks instead of ``__dict__``, ``patch.__exit__`` tries to delete
|
||||
the attribute it set instead of setting a new one (assuming that
|
||||
the object does not catpure ``__setattr__``, so the patch
|
||||
created a new attribute in ``__dict__``).
|
||||
|
||||
_Mockable's getattr and setattr pass through to the underlying
|
||||
OptionParser, and delattr undoes the effect of a previous setattr.
|
||||
"""
|
||||
def __init__(self, options):
|
||||
# Modify __dict__ directly to bypass __setattr__
|
||||
self.__dict__['_options'] = options
|
||||
self.__dict__['_originals'] = {}
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._options, name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
assert name not in self._originals, "don't reuse mockable objects"
|
||||
self._originals[name] = getattr(self._options, name)
|
||||
setattr(self._options, name, value)
|
||||
|
||||
def __delattr__(self, name):
|
||||
setattr(self._options, name, self._originals.pop(name))
|
||||
|
||||
class _Option(object):
|
||||
def __init__(self, name, default=None, type=basestring, help=None, metavar=None,
|
||||
multiple=False, file_name=None, group_name=None):
|
||||
def __init__(self, name, default=None, type=basestring, help=None,
|
||||
metavar=None, multiple=False, file_name=None, group_name=None,
|
||||
callback=None):
|
||||
if default is None and multiple:
|
||||
default = []
|
||||
self.name = name
|
||||
@@ -195,6 +313,7 @@ class _Option(object):
|
||||
self.multiple = multiple
|
||||
self.file_name = file_name
|
||||
self.group_name = group_name
|
||||
self.callback = callback
|
||||
self.default = default
|
||||
self._value = None
|
||||
|
||||
@@ -221,6 +340,8 @@ class _Option(object):
|
||||
self._value.append(_parse(part))
|
||||
else:
|
||||
self._value = _parse(value)
|
||||
if self.callback is not None:
|
||||
self.callback(self._value)
|
||||
return self.value()
|
||||
|
||||
def set(self, value):
|
||||
@@ -237,6 +358,8 @@ class _Option(object):
|
||||
raise Error("Option %r is required to be a %s (%s given)" %
|
||||
(self.name, self.type.__name__, type(value)))
|
||||
self._value = value
|
||||
if self.callback is not None:
|
||||
self.callback(self._value)
|
||||
|
||||
# Supported date/time formats in our options
|
||||
_DATETIME_FORMATS = [
|
||||
@@ -303,179 +426,54 @@ class _Option(object):
|
||||
return _unicode(value)
|
||||
|
||||
|
||||
options = _Options()
|
||||
"""Global options dictionary.
|
||||
options = OptionParser()
|
||||
"""Global options object.
|
||||
|
||||
Supports both attribute-style and dict-style access.
|
||||
All defined options are available as attributes on this object.
|
||||
"""
|
||||
|
||||
|
||||
def define(name, default=None, type=None, help=None, metavar=None,
|
||||
multiple=False, group=None):
|
||||
"""Defines a new command line option.
|
||||
multiple=False, group=None, callback=None):
|
||||
"""Defines an option in the global namespace.
|
||||
|
||||
If type is given (one of str, float, int, datetime, or timedelta)
|
||||
or can be inferred from the default, we parse the command line
|
||||
arguments based on the given type. If multiple is True, we accept
|
||||
comma-separated values, and the option value is always a list.
|
||||
|
||||
For multi-value integers, we also accept the syntax x:y, which
|
||||
turns into range(x, y) - very useful for long integer ranges.
|
||||
|
||||
help and metavar are used to construct the automatically generated
|
||||
command line help string. The help message is formatted like::
|
||||
|
||||
--name=METAVAR help string
|
||||
|
||||
group is used to group the defined options in logical groups. By default,
|
||||
command line options are grouped by the defined file.
|
||||
|
||||
Command line option names must be unique globally. They can be parsed
|
||||
from the command line with parse_command_line() or parsed from a
|
||||
config file with parse_config_file.
|
||||
See `OptionParser.define`.
|
||||
"""
|
||||
return options.define(name, default=default, type=type, help=help,
|
||||
metavar=metavar, multiple=multiple, group=group)
|
||||
metavar=metavar, multiple=multiple, group=group,
|
||||
callback=callback)
|
||||
|
||||
|
||||
def parse_command_line(args=None):
|
||||
"""Parses all options given on the command line (defaults to sys.argv).
|
||||
def parse_command_line(args=None, final=True):
|
||||
"""Parses global options from the command line.
|
||||
|
||||
Note that args[0] is ignored since it is the program name in sys.argv.
|
||||
|
||||
We return a list of all arguments that are not parsed as options.
|
||||
See `OptionParser.parse_command_line`.
|
||||
"""
|
||||
return options.parse_command_line(args)
|
||||
return options.parse_command_line(args, final=final)
|
||||
|
||||
|
||||
def parse_config_file(path):
|
||||
"""Parses and loads the Python config file at the given path."""
|
||||
return options.parse_config_file(path)
|
||||
def parse_config_file(path, final=True):
|
||||
"""Parses global options from a config file.
|
||||
|
||||
See `OptionParser.parse_config_file`.
|
||||
"""
|
||||
return options.parse_config_file(path, final=final)
|
||||
|
||||
|
||||
def print_help(file=sys.stdout):
|
||||
"""Prints all the command line options to stdout."""
|
||||
def print_help(file=None):
|
||||
"""Prints all the command line options to stderr (or another file).
|
||||
|
||||
See `OptionParser.print_help`.
|
||||
"""
|
||||
return options.print_help(file)
|
||||
|
||||
def add_parse_callback(callback):
|
||||
"""Adds a parse callback, to be invoked when option parsing is done.
|
||||
|
||||
def enable_pretty_logging(options=options):
|
||||
"""Turns on formatted logging output as configured.
|
||||
|
||||
This is called automatically by `parse_command_line`.
|
||||
See `OptionParser.add_parse_callback`
|
||||
"""
|
||||
root_logger = logging.getLogger()
|
||||
if options.log_file_prefix:
|
||||
channel = logging.handlers.RotatingFileHandler(
|
||||
filename=options.log_file_prefix,
|
||||
maxBytes=options.log_file_max_size,
|
||||
backupCount=options.log_file_num_backups)
|
||||
channel.setFormatter(_LogFormatter(color=False))
|
||||
root_logger.addHandler(channel)
|
||||
|
||||
if (options.log_to_stderr or
|
||||
(options.log_to_stderr is None and not root_logger.handlers)):
|
||||
# Set up color if we are in a tty and curses is installed
|
||||
color = False
|
||||
if curses and sys.stderr.isatty():
|
||||
try:
|
||||
curses.setupterm()
|
||||
if curses.tigetnum("colors") > 0:
|
||||
color = True
|
||||
except Exception:
|
||||
pass
|
||||
channel = logging.StreamHandler()
|
||||
channel.setFormatter(_LogFormatter(color=color))
|
||||
root_logger.addHandler(channel)
|
||||
|
||||
|
||||
class _LogFormatter(logging.Formatter):
|
||||
def __init__(self, color, *args, **kwargs):
|
||||
logging.Formatter.__init__(self, *args, **kwargs)
|
||||
self._color = color
|
||||
if color:
|
||||
# The curses module has some str/bytes confusion in
|
||||
# python3. Until version 3.2.3, most methods return
|
||||
# bytes, but only accept strings. In addition, we want to
|
||||
# output these strings with the logging module, which
|
||||
# works with unicode strings. The explicit calls to
|
||||
# unicode() below are harmless in python2 but will do the
|
||||
# right conversion in python 3.
|
||||
fg_color = (curses.tigetstr("setaf") or
|
||||
curses.tigetstr("setf") or "")
|
||||
if (3, 0) < sys.version_info < (3, 2, 3):
|
||||
fg_color = unicode(fg_color, "ascii")
|
||||
self._colors = {
|
||||
logging.DEBUG: unicode(curses.tparm(fg_color, 4), # Blue
|
||||
"ascii"),
|
||||
logging.INFO: unicode(curses.tparm(fg_color, 2), # Green
|
||||
"ascii"),
|
||||
logging.WARNING: unicode(curses.tparm(fg_color, 3), # Yellow
|
||||
"ascii"),
|
||||
logging.ERROR: unicode(curses.tparm(fg_color, 1), # Red
|
||||
"ascii"),
|
||||
}
|
||||
self._normal = unicode(curses.tigetstr("sgr0"), "ascii")
|
||||
|
||||
def format(self, record):
|
||||
try:
|
||||
record.message = record.getMessage()
|
||||
except Exception, e:
|
||||
record.message = "Bad message (%r): %r" % (e, record.__dict__)
|
||||
assert isinstance(record.message, basestring) # guaranteed by logging
|
||||
record.asctime = time.strftime(
|
||||
"%y%m%d %H:%M:%S", self.converter(record.created))
|
||||
prefix = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]' % \
|
||||
record.__dict__
|
||||
if self._color:
|
||||
prefix = (self._colors.get(record.levelno, self._normal) +
|
||||
prefix + self._normal)
|
||||
|
||||
# Encoding notes: The logging module prefers to work with character
|
||||
# strings, but only enforces that log messages are instances of
|
||||
# basestring. In python 2, non-ascii bytestrings will make
|
||||
# their way through the logging framework until they blow up with
|
||||
# an unhelpful decoding error (with this formatter it happens
|
||||
# when we attach the prefix, but there are other opportunities for
|
||||
# exceptions further along in the framework).
|
||||
#
|
||||
# If a byte string makes it this far, convert it to unicode to
|
||||
# ensure it will make it out to the logs. Use repr() as a fallback
|
||||
# to ensure that all byte strings can be converted successfully,
|
||||
# but don't do it by default so we don't add extra quotes to ascii
|
||||
# bytestrings. This is a bit of a hacky place to do this, but
|
||||
# it's worth it since the encoding errors that would otherwise
|
||||
# result are so useless (and tornado is fond of using utf8-encoded
|
||||
# byte strings whereever possible).
|
||||
try:
|
||||
message = _unicode(record.message)
|
||||
except UnicodeDecodeError:
|
||||
message = repr(record.message)
|
||||
|
||||
formatted = prefix + " " + message
|
||||
if record.exc_info:
|
||||
if not record.exc_text:
|
||||
record.exc_text = self.formatException(record.exc_info)
|
||||
if record.exc_text:
|
||||
formatted = formatted.rstrip() + "\n" + record.exc_text
|
||||
return formatted.replace("\n", "\n ")
|
||||
options.add_parse_callback(callback)
|
||||
|
||||
|
||||
# Default options
|
||||
define("help", type=bool, help="show this help information")
|
||||
define("logging", default="info",
|
||||
help=("Set the Python log level. If 'none', tornado won't touch the "
|
||||
"logging configuration."),
|
||||
metavar="debug|info|warning|error|none")
|
||||
define("log_to_stderr", type=bool, default=None,
|
||||
help=("Send log output to stderr (colorized if possible). "
|
||||
"By default use stderr if --log_file_prefix is not set and "
|
||||
"no other logging is configured."))
|
||||
define("log_file_prefix", type=str, default=None, metavar="PATH",
|
||||
help=("Path prefix for log files. "
|
||||
"Note that if you are running multiple tornado processes, "
|
||||
"log_file_prefix must be different for each of them (e.g. "
|
||||
"include the port number)"))
|
||||
define("log_file_max_size", type=int, default=100 * 1000 * 1000,
|
||||
help="max size of log files before rollover")
|
||||
define("log_file_num_backups", type=int, default=10,
|
||||
help="number of log files to keep")
|
||||
define_logging_options(options)
|
||||
|
||||
@@ -32,3 +32,14 @@ if os.name == 'nt':
|
||||
from tornado.platform.windows import set_close_exec
|
||||
else:
|
||||
from tornado.platform.posix import set_close_exec, Waker
|
||||
|
||||
try:
|
||||
# monotime monkey-patches the time module to have a monotonic function
|
||||
# in versions of python before 3.3.
|
||||
import monotime
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
from time import monotonic as monotonic_time
|
||||
except ImportError:
|
||||
monotonic_time = None
|
||||
|
||||
@@ -69,6 +69,9 @@ class Waker(interface.Waker):
|
||||
def fileno(self):
|
||||
return self.reader.fileno()
|
||||
|
||||
def write_fileno(self):
|
||||
return self.writer.fileno()
|
||||
|
||||
def wake(self):
|
||||
try:
|
||||
self.writer.send(b("x"))
|
||||
|
||||
68
libs/tornado/platform/epoll.py
Executable file
68
libs/tornado/platform/epoll.py
Executable file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2012 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""EPoll-based IOLoop implementation for Linux systems.
|
||||
|
||||
Supports the standard library's `select.epoll` function for Python 2.6+,
|
||||
and our own C module for Python 2.5.
|
||||
"""
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import os
|
||||
import select
|
||||
|
||||
from tornado.ioloop import PollIOLoop
|
||||
|
||||
if hasattr(select, 'epoll'):
|
||||
# Python 2.6+
|
||||
class EPollIOLoop(PollIOLoop):
|
||||
def initialize(self, **kwargs):
|
||||
super(EPollIOLoop, self).initialize(impl=select.epoll(), **kwargs)
|
||||
else:
|
||||
# Python 2.5
|
||||
from tornado import epoll
|
||||
|
||||
class _EPoll(object):
|
||||
"""An epoll-based event loop using our C module for Python 2.5 systems"""
|
||||
_EPOLL_CTL_ADD = 1
|
||||
_EPOLL_CTL_DEL = 2
|
||||
_EPOLL_CTL_MOD = 3
|
||||
|
||||
def __init__(self):
|
||||
self._epoll_fd = epoll.epoll_create()
|
||||
|
||||
def fileno(self):
|
||||
return self._epoll_fd
|
||||
|
||||
def close(self):
|
||||
os.close(self._epoll_fd)
|
||||
|
||||
def register(self, fd, events):
|
||||
epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_ADD, fd, events)
|
||||
|
||||
def modify(self, fd, events):
|
||||
epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_MOD, fd, events)
|
||||
|
||||
def unregister(self, fd):
|
||||
epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_DEL, fd, 0)
|
||||
|
||||
def poll(self, timeout):
|
||||
return epoll.epoll_wait(self._epoll_fd, int(timeout * 1000))
|
||||
|
||||
|
||||
class EPollIOLoop(PollIOLoop):
|
||||
def initialize(self, **kwargs):
|
||||
super(EPollIOLoop, self).initialize(impl=_EPoll(), **kwargs)
|
||||
|
||||
@@ -39,13 +39,17 @@ class Waker(object):
|
||||
the ``IOLoop`` is closed, it closes its waker too.
|
||||
"""
|
||||
def fileno(self):
|
||||
"""Returns a file descriptor for this waker.
|
||||
"""Returns the read file descriptor for this waker.
|
||||
|
||||
Must be suitable for use with ``select()`` or equivalent on the
|
||||
local platform.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def write_fileno(self):
|
||||
"""Returns the write file descriptor for this waker."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def wake(self):
|
||||
"""Triggers activity on the waker's file descriptor."""
|
||||
raise NotImplementedError()
|
||||
|
||||
91
libs/tornado/platform/kqueue.py
Executable file
91
libs/tornado/platform/kqueue.py
Executable file
@@ -0,0 +1,91 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2012 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""KQueue-based IOLoop implementation for BSD/Mac systems."""
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import select
|
||||
|
||||
from tornado.ioloop import IOLoop, PollIOLoop
|
||||
|
||||
assert hasattr(select, 'kqueue'), 'kqueue not supported'
|
||||
|
||||
class _KQueue(object):
|
||||
"""A kqueue-based event loop for BSD/Mac systems."""
|
||||
def __init__(self):
|
||||
self._kqueue = select.kqueue()
|
||||
self._active = {}
|
||||
|
||||
def fileno(self):
|
||||
return self._kqueue.fileno()
|
||||
|
||||
def close(self):
|
||||
self._kqueue.close()
|
||||
|
||||
def register(self, fd, events):
|
||||
if fd in self._active:
|
||||
raise IOError("fd %d already registered" % fd)
|
||||
self._control(fd, events, select.KQ_EV_ADD)
|
||||
self._active[fd] = events
|
||||
|
||||
def modify(self, fd, events):
|
||||
self.unregister(fd)
|
||||
self.register(fd, events)
|
||||
|
||||
def unregister(self, fd):
|
||||
events = self._active.pop(fd)
|
||||
self._control(fd, events, select.KQ_EV_DELETE)
|
||||
|
||||
def _control(self, fd, events, flags):
|
||||
kevents = []
|
||||
if events & IOLoop.WRITE:
|
||||
kevents.append(select.kevent(
|
||||
fd, filter=select.KQ_FILTER_WRITE, flags=flags))
|
||||
if events & IOLoop.READ or not kevents:
|
||||
# Always read when there is not a write
|
||||
kevents.append(select.kevent(
|
||||
fd, filter=select.KQ_FILTER_READ, flags=flags))
|
||||
# Even though control() takes a list, it seems to return EINVAL
|
||||
# on Mac OS X (10.6) when there is more than one event in the list.
|
||||
for kevent in kevents:
|
||||
self._kqueue.control([kevent], 0)
|
||||
|
||||
def poll(self, timeout):
|
||||
kevents = self._kqueue.control(None, 1000, timeout)
|
||||
events = {}
|
||||
for kevent in kevents:
|
||||
fd = kevent.ident
|
||||
if kevent.filter == select.KQ_FILTER_READ:
|
||||
events[fd] = events.get(fd, 0) | IOLoop.READ
|
||||
if kevent.filter == select.KQ_FILTER_WRITE:
|
||||
if kevent.flags & select.KQ_EV_EOF:
|
||||
# If an asynchronous connection is refused, kqueue
|
||||
# returns a write event with the EOF flag set.
|
||||
# Turn this into an error for consistency with the
|
||||
# other IOLoop implementations.
|
||||
# Note that for read events, EOF may be returned before
|
||||
# all data has been consumed from the socket buffer,
|
||||
# so we only check for EOF on write events.
|
||||
events[fd] = IOLoop.ERROR
|
||||
else:
|
||||
events[fd] = events.get(fd, 0) | IOLoop.WRITE
|
||||
if kevent.flags & select.KQ_EV_ERROR:
|
||||
events[fd] = events.get(fd, 0) | IOLoop.ERROR
|
||||
return events.items()
|
||||
|
||||
|
||||
class KQueueIOLoop(PollIOLoop):
|
||||
def initialize(self, **kwargs):
|
||||
super(KQueueIOLoop, self).initialize(impl=_KQueue(), **kwargs)
|
||||
@@ -48,6 +48,9 @@ class Waker(interface.Waker):
|
||||
def fileno(self):
|
||||
return self.reader.fileno()
|
||||
|
||||
def write_fileno(self):
|
||||
return self.writer.fileno()
|
||||
|
||||
def wake(self):
|
||||
try:
|
||||
self.writer.write(b("x"))
|
||||
|
||||
75
libs/tornado/platform/select.py
Executable file
75
libs/tornado/platform/select.py
Executable file
@@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2012 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Select-based IOLoop implementation.
|
||||
|
||||
Used as a fallback for systems that don't support epoll or kqueue.
|
||||
"""
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import select
|
||||
|
||||
from tornado.ioloop import IOLoop, PollIOLoop
|
||||
|
||||
class _Select(object):
|
||||
"""A simple, select()-based IOLoop implementation for non-Linux systems"""
|
||||
def __init__(self):
|
||||
self.read_fds = set()
|
||||
self.write_fds = set()
|
||||
self.error_fds = set()
|
||||
self.fd_sets = (self.read_fds, self.write_fds, self.error_fds)
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
def register(self, fd, events):
|
||||
if fd in self.read_fds or fd in self.write_fds or fd in self.error_fds:
|
||||
raise IOError("fd %d already registered" % fd)
|
||||
if events & IOLoop.READ:
|
||||
self.read_fds.add(fd)
|
||||
if events & IOLoop.WRITE:
|
||||
self.write_fds.add(fd)
|
||||
if events & IOLoop.ERROR:
|
||||
self.error_fds.add(fd)
|
||||
# Closed connections are reported as errors by epoll and kqueue,
|
||||
# but as zero-byte reads by select, so when errors are requested
|
||||
# we need to listen for both read and error.
|
||||
self.read_fds.add(fd)
|
||||
|
||||
def modify(self, fd, events):
|
||||
self.unregister(fd)
|
||||
self.register(fd, events)
|
||||
|
||||
def unregister(self, fd):
|
||||
self.read_fds.discard(fd)
|
||||
self.write_fds.discard(fd)
|
||||
self.error_fds.discard(fd)
|
||||
|
||||
def poll(self, timeout):
|
||||
readable, writeable, errors = select.select(
|
||||
self.read_fds, self.write_fds, self.error_fds, timeout)
|
||||
events = {}
|
||||
for fd in readable:
|
||||
events[fd] = events.get(fd, 0) | IOLoop.READ
|
||||
for fd in writeable:
|
||||
events[fd] = events.get(fd, 0) | IOLoop.WRITE
|
||||
for fd in errors:
|
||||
events[fd] = events.get(fd, 0) | IOLoop.ERROR
|
||||
return events.items()
|
||||
|
||||
class SelectIOLoop(PollIOLoop):
|
||||
def initialize(self, **kwargs):
|
||||
super(SelectIOLoop, self).initialize(impl=_Select(), **kwargs)
|
||||
|
||||
@@ -16,19 +16,25 @@
|
||||
# Note: This module's docs are not currently extracted automatically,
|
||||
# so changes must be made manually to twisted.rst
|
||||
# TODO: refactor doc build process to use an appropriate virtualenv
|
||||
"""A Twisted reactor built on the Tornado IOLoop.
|
||||
"""Bridges between the Twisted reactor and Tornado IOLoop.
|
||||
|
||||
This module lets you run applications and libraries written for
|
||||
Twisted in a Tornado application. To use it, simply call `install` at
|
||||
the beginning of the application::
|
||||
Twisted in a Tornado application. It can be used in two modes,
|
||||
depending on which library's underlying event loop you want to use.
|
||||
|
||||
Twisted on Tornado
|
||||
------------------
|
||||
|
||||
`TornadoReactor` implements the Twisted reactor interface on top of
|
||||
the Tornado IOLoop. To use it, simply call `install` at the beginning
|
||||
of the application::
|
||||
|
||||
import tornado.platform.twisted
|
||||
tornado.platform.twisted.install()
|
||||
from twisted.internet import reactor
|
||||
|
||||
When the app is ready to start, call `IOLoop.instance().start()`
|
||||
instead of `reactor.run()`. This will allow you to use a mixture of
|
||||
Twisted and Tornado code in the same process.
|
||||
instead of `reactor.run()`.
|
||||
|
||||
It is also possible to create a non-global reactor by calling
|
||||
`tornado.platform.twisted.TornadoReactor(io_loop)`. However, if
|
||||
@@ -41,18 +47,32 @@ recommended to call::
|
||||
|
||||
before closing the `IOLoop`.
|
||||
|
||||
This module has been tested with Twisted versions 11.0.0, 11.1.0, and 12.0.0
|
||||
Tornado on Twisted
|
||||
------------------
|
||||
|
||||
`TwistedIOLoop` implements the Tornado IOLoop interface on top of the Twisted
|
||||
reactor. Recommended usage::
|
||||
|
||||
from tornado.platform.twisted import TwistedIOLoop
|
||||
from twisted.internet import reactor
|
||||
TwistedIOLoop().install()
|
||||
# Set up your tornado application as usual using `IOLoop.instance`
|
||||
reactor.run()
|
||||
|
||||
`TwistedIOLoop` always uses the global Twisted reactor.
|
||||
|
||||
This module has been tested with Twisted versions 11.0.0 and newer.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from twisted.internet.posixbase import PosixReactorBase
|
||||
from twisted.internet.interfaces import \
|
||||
IReactorFDSet, IDelayedCall, IReactorTime
|
||||
IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor
|
||||
from twisted.python import failure, log
|
||||
from twisted.internet import error
|
||||
|
||||
@@ -60,7 +80,8 @@ from zope.interface import implementer
|
||||
|
||||
import tornado
|
||||
import tornado.ioloop
|
||||
from tornado.stack_context import NullContext
|
||||
from tornado.log import app_log
|
||||
from tornado.stack_context import NullContext, wrap
|
||||
from tornado.ioloop import IOLoop
|
||||
|
||||
|
||||
@@ -80,7 +101,7 @@ class TornadoDelayedCall(object):
|
||||
try:
|
||||
self._func()
|
||||
except:
|
||||
logging.error("_called caught exception", exc_info=True)
|
||||
app_log.error("_called caught exception", exc_info=True)
|
||||
|
||||
def getTime(self):
|
||||
return self._time
|
||||
@@ -127,6 +148,7 @@ class TornadoReactor(PosixReactorBase):
|
||||
self._fds = {} # a map of fd to a (reader, writer) tuple
|
||||
self._delayedCalls = {}
|
||||
PosixReactorBase.__init__(self)
|
||||
self.addSystemEventTrigger('during', 'shutdown', self.crash)
|
||||
|
||||
# IOLoop.start() bypasses some of the reactor initialization.
|
||||
# Fire off the necessary events if they weren't already triggered
|
||||
@@ -138,7 +160,7 @@ class TornadoReactor(PosixReactorBase):
|
||||
|
||||
# IReactorTime
|
||||
def seconds(self):
|
||||
return time.time()
|
||||
return self._io_loop.time()
|
||||
|
||||
def callLater(self, seconds, f, *args, **kw):
|
||||
dc = TornadoDelayedCall(self, seconds, f, *args, **kw)
|
||||
@@ -169,6 +191,8 @@ class TornadoReactor(PosixReactorBase):
|
||||
|
||||
# IReactorFDSet
|
||||
def _invoke_callback(self, fd, events):
|
||||
if fd not in self._fds:
|
||||
return
|
||||
(reader, writer) = self._fds[fd]
|
||||
if reader:
|
||||
err = None
|
||||
@@ -280,7 +304,8 @@ class TornadoReactor(PosixReactorBase):
|
||||
# IOLoop.start() instead of Reactor.run().
|
||||
def stop(self):
|
||||
PosixReactorBase.stop(self)
|
||||
self._io_loop.stop()
|
||||
fire_shutdown = functools.partial(self.fireSystemEvent, "shutdown")
|
||||
self._io_loop.add_callback(fire_shutdown)
|
||||
|
||||
def crash(self):
|
||||
PosixReactorBase.crash(self)
|
||||
@@ -291,8 +316,6 @@ class TornadoReactor(PosixReactorBase):
|
||||
|
||||
def mainLoop(self):
|
||||
self._io_loop.start()
|
||||
if self._stopped:
|
||||
self.fireSystemEvent("shutdown")
|
||||
TornadoReactor = implementer(IReactorTime, IReactorFDSet)(TornadoReactor)
|
||||
|
||||
|
||||
@@ -328,3 +351,113 @@ def install(io_loop=None):
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(reactor)
|
||||
return reactor
|
||||
|
||||
class _FD(object):
|
||||
def __init__(self, fd, handler):
|
||||
self.fd = fd
|
||||
self.handler = handler
|
||||
self.reading = False
|
||||
self.writing = False
|
||||
self.lost = False
|
||||
|
||||
def fileno(self):
|
||||
return self.fd
|
||||
|
||||
def doRead(self):
|
||||
if not self.lost:
|
||||
self.handler(self.fd, tornado.ioloop.IOLoop.READ)
|
||||
|
||||
def doWrite(self):
|
||||
if not self.lost:
|
||||
self.handler(self.fd, tornado.ioloop.IOLoop.WRITE)
|
||||
|
||||
def connectionLost(self, reason):
|
||||
if not self.lost:
|
||||
self.handler(self.fd, tornado.ioloop.IOLoop.ERROR)
|
||||
self.lost = True
|
||||
|
||||
def logPrefix(self):
|
||||
return ''
|
||||
_FD = implementer(IReadDescriptor, IWriteDescriptor)(_FD)
|
||||
|
||||
class TwistedIOLoop(tornado.ioloop.IOLoop):
|
||||
"""IOLoop implementation that runs on Twisted.
|
||||
|
||||
Uses the global Twisted reactor. It is possible to create multiple
|
||||
TwistedIOLoops in the same process, but it doesn't really make sense
|
||||
because they will all run in the same thread.
|
||||
|
||||
Not compatible with `tornado.process.Subprocess.set_exit_callback`
|
||||
because the ``SIGCHLD`` handlers used by Tornado and Twisted conflict
|
||||
with each other.
|
||||
"""
|
||||
def initialize(self):
|
||||
from twisted.internet import reactor
|
||||
self.reactor = reactor
|
||||
self.fds = {}
|
||||
|
||||
def close(self, all_fds=False):
|
||||
self.reactor.removeAll()
|
||||
for c in self.reactor.getDelayedCalls():
|
||||
c.cancel()
|
||||
|
||||
def add_handler(self, fd, handler, events):
|
||||
if fd in self.fds:
|
||||
raise ValueError('fd %d added twice' % fd)
|
||||
self.fds[fd] = _FD(fd, wrap(handler))
|
||||
if events | tornado.ioloop.IOLoop.READ:
|
||||
self.fds[fd].reading = True
|
||||
self.reactor.addReader(self.fds[fd])
|
||||
if events | tornado.ioloop.IOLoop.WRITE:
|
||||
self.fds[fd].writing = True
|
||||
self.reactor.addWriter(self.fds[fd])
|
||||
|
||||
def update_handler(self, fd, events):
|
||||
if events | tornado.ioloop.IOLoop.READ:
|
||||
if not self.fds[fd].reading:
|
||||
self.fds[fd].reading = True
|
||||
self.reactor.addReader(self.fds[fd])
|
||||
else:
|
||||
if self.fds[fd].reading:
|
||||
self.fds[fd].reading = False
|
||||
self.reactor.removeReader(self.fds[fd])
|
||||
if events | tornado.ioloop.IOLoop.WRITE:
|
||||
if not self.fds[fd].writing:
|
||||
self.fds[fd].writing = True
|
||||
self.reactor.addWriter(self.fds[fd])
|
||||
else:
|
||||
if self.fds[fd].writing:
|
||||
self.fds[fd].writing = False
|
||||
self.reactor.removeWriter(self.fds[fd])
|
||||
|
||||
def remove_handler(self, fd):
|
||||
self.fds[fd].lost = True
|
||||
if self.fds[fd].reading:
|
||||
self.reactor.removeReader(self.fds[fd])
|
||||
if self.fds[fd].writing:
|
||||
self.reactor.removeWriter(self.fds[fd])
|
||||
del self.fds[fd]
|
||||
|
||||
def start(self):
|
||||
self.reactor.run()
|
||||
|
||||
def stop(self):
|
||||
self.reactor.crash()
|
||||
|
||||
def add_timeout(self, deadline, callback):
|
||||
if isinstance(deadline, (int, long, float)):
|
||||
delay = max(deadline - self.time(), 0)
|
||||
elif isinstance(deadline, datetime.timedelta):
|
||||
delay = deadline.total_seconds()
|
||||
else:
|
||||
raise TypeError("Unsupported deadline %r")
|
||||
return self.reactor.callLater(delay, wrap(callback))
|
||||
|
||||
def remove_timeout(self, timeout):
|
||||
timeout.cancel()
|
||||
|
||||
def add_callback(self, callback):
|
||||
self.reactor.callFromThread(wrap(callback))
|
||||
|
||||
def add_callback_from_signal(self, callback):
|
||||
self.add_callback(callback)
|
||||
|
||||
@@ -19,14 +19,19 @@
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import errno
|
||||
import logging
|
||||
import functools
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from binascii import hexlify
|
||||
|
||||
from tornado import ioloop
|
||||
from tornado.iostream import PipeIOStream
|
||||
from tornado.log import gen_log
|
||||
from tornado import stack_context
|
||||
|
||||
try:
|
||||
import multiprocessing # Python 2.6+
|
||||
@@ -45,7 +50,7 @@ def cpu_count():
|
||||
return os.sysconf("SC_NPROCESSORS_CONF")
|
||||
except ValueError:
|
||||
pass
|
||||
logging.error("Could not detect number of processors; assuming 1")
|
||||
gen_log.error("Could not detect number of processors; assuming 1")
|
||||
return 1
|
||||
|
||||
|
||||
@@ -98,7 +103,7 @@ def fork_processes(num_processes, max_restarts=100):
|
||||
raise RuntimeError("Cannot run in multiple processes: IOLoop instance "
|
||||
"has already been initialized. You cannot call "
|
||||
"IOLoop.instance() before calling start_processes()")
|
||||
logging.info("Starting %d processes", num_processes)
|
||||
gen_log.info("Starting %d processes", num_processes)
|
||||
children = {}
|
||||
|
||||
def start_child(i):
|
||||
@@ -128,13 +133,13 @@ def fork_processes(num_processes, max_restarts=100):
|
||||
continue
|
||||
id = children.pop(pid)
|
||||
if os.WIFSIGNALED(status):
|
||||
logging.warning("child %d (pid %d) killed by signal %d, restarting",
|
||||
gen_log.warning("child %d (pid %d) killed by signal %d, restarting",
|
||||
id, pid, os.WTERMSIG(status))
|
||||
elif os.WEXITSTATUS(status) != 0:
|
||||
logging.warning("child %d (pid %d) exited with status %d, restarting",
|
||||
gen_log.warning("child %d (pid %d) exited with status %d, restarting",
|
||||
id, pid, os.WEXITSTATUS(status))
|
||||
else:
|
||||
logging.info("child %d (pid %d) exited normally", id, pid)
|
||||
gen_log.info("child %d (pid %d) exited normally", id, pid)
|
||||
continue
|
||||
num_restarts += 1
|
||||
if num_restarts > max_restarts:
|
||||
@@ -156,3 +161,122 @@ def task_id():
|
||||
"""
|
||||
global _task_id
|
||||
return _task_id
|
||||
|
||||
class Subprocess(object):
|
||||
"""Wraps ``subprocess.Popen`` with IOStream support.
|
||||
|
||||
The constructor is the same as ``subprocess.Popen`` with the following
|
||||
additions:
|
||||
|
||||
* ``stdin``, ``stdout``, and ``stderr`` may have the value
|
||||
`tornado.process.Subprocess.STREAM`, which will make the corresponding
|
||||
attribute of the resulting Subprocess a `PipeIOStream`.
|
||||
* A new keyword argument ``io_loop`` may be used to pass in an IOLoop.
|
||||
"""
|
||||
STREAM = object()
|
||||
|
||||
_initialized = False
|
||||
_waiting = {}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.io_loop = kwargs.pop('io_loop', None)
|
||||
to_close = []
|
||||
if kwargs.get('stdin') is Subprocess.STREAM:
|
||||
in_r, in_w = os.pipe()
|
||||
kwargs['stdin'] = in_r
|
||||
to_close.append(in_r)
|
||||
self.stdin = PipeIOStream(in_w, io_loop=self.io_loop)
|
||||
if kwargs.get('stdout') is Subprocess.STREAM:
|
||||
out_r, out_w = os.pipe()
|
||||
kwargs['stdout'] = out_w
|
||||
to_close.append(out_w)
|
||||
self.stdout = PipeIOStream(out_r, io_loop=self.io_loop)
|
||||
if kwargs.get('stderr') is Subprocess.STREAM:
|
||||
err_r, err_w = os.pipe()
|
||||
kwargs['stderr'] = err_w
|
||||
to_close.append(err_w)
|
||||
self.stdout = PipeIOStream(err_r, io_loop=self.io_loop)
|
||||
self.proc = subprocess.Popen(*args, **kwargs)
|
||||
for fd in to_close:
|
||||
os.close(fd)
|
||||
for attr in ['stdin', 'stdout', 'stderr', 'pid']:
|
||||
if not hasattr(self, attr): # don't clobber streams set above
|
||||
setattr(self, attr, getattr(self.proc, attr))
|
||||
self._exit_callback = None
|
||||
self.returncode = None
|
||||
|
||||
def set_exit_callback(self, callback):
|
||||
"""Runs ``callback`` when this process exits.
|
||||
|
||||
The callback takes one argument, the return code of the process.
|
||||
|
||||
This method uses a ``SIGCHILD`` handler, which is a global setting
|
||||
and may conflict if you have other libraries trying to handle the
|
||||
same signal. If you are using more than one ``IOLoop`` it may
|
||||
be necessary to call `Subprocess.initialize` first to designate
|
||||
one ``IOLoop`` to run the signal handlers.
|
||||
|
||||
In many cases a close callback on the stdout or stderr streams
|
||||
can be used as an alternative to an exit callback if the
|
||||
signal handler is causing a problem.
|
||||
"""
|
||||
self._exit_callback = stack_context.wrap(callback)
|
||||
Subprocess.initialize(self.io_loop)
|
||||
Subprocess._waiting[self.pid] = self
|
||||
Subprocess._try_cleanup_process(self.pid)
|
||||
|
||||
@classmethod
|
||||
def initialize(cls, io_loop=None):
|
||||
"""Initializes the ``SIGCHILD`` handler.
|
||||
|
||||
The signal handler is run on an IOLoop to avoid locking issues.
|
||||
Note that the IOLoop used for signal handling need not be the
|
||||
same one used by individual Subprocess objects (as long as the
|
||||
IOLoops are each running in separate threads).
|
||||
"""
|
||||
if cls._initialized:
|
||||
return
|
||||
if io_loop is None:
|
||||
io_loop = ioloop.IOLoop.instance()
|
||||
cls._old_sigchld = signal.signal(
|
||||
signal.SIGCHLD,
|
||||
lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup))
|
||||
cls._initialized = True
|
||||
|
||||
@classmethod
|
||||
def uninitialize(cls):
|
||||
"""Removes the ``SIGCHILD`` handler."""
|
||||
if not cls._initialized:
|
||||
return
|
||||
signal.signal(signal.SIGCHLD, cls._old_sigchld)
|
||||
cls._initialized = False
|
||||
|
||||
@classmethod
|
||||
def _cleanup(cls):
|
||||
for pid in cls._waiting.keys():
|
||||
cls._try_cleanup_process(pid)
|
||||
|
||||
@classmethod
|
||||
def _try_cleanup_process(cls, pid):
|
||||
try:
|
||||
ret_pid, status = os.waitpid(pid, os.WNOHANG)
|
||||
except OSError, e:
|
||||
if e.args[0] == errno.ECHILD:
|
||||
return
|
||||
if ret_pid == 0:
|
||||
return
|
||||
assert ret_pid == pid
|
||||
subproc = cls._waiting.pop(pid)
|
||||
subproc.io_loop.add_callback_from_signal(
|
||||
functools.partial(subproc._set_returncode, status))
|
||||
|
||||
def _set_returncode(self, status):
|
||||
if os.WIFSIGNALED(status):
|
||||
self.returncode = -os.WTERMSIG(status)
|
||||
else:
|
||||
assert os.WIFEXITED(status)
|
||||
self.returncode = os.WEXITSTATUS(status)
|
||||
if self._exit_callback:
|
||||
callback = self._exit_callback
|
||||
self._exit_callback = None
|
||||
callback(self.returncode)
|
||||
|
||||
@@ -2,9 +2,11 @@
|
||||
from __future__ import absolute_import, division, with_statement
|
||||
|
||||
from tornado.escape import utf8, _unicode, native_str
|
||||
from tornado.httpclient import HTTPRequest, HTTPResponse, HTTPError, AsyncHTTPClient, main
|
||||
from tornado.httpclient import HTTPRequest, HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy
|
||||
from tornado.httputil import HTTPHeaders
|
||||
from tornado.iostream import IOStream, SSLIOStream
|
||||
from tornado.netutil import Resolver
|
||||
from tornado.log import gen_log
|
||||
from tornado import stack_context
|
||||
from tornado.util import b, GzipDecompressor
|
||||
|
||||
@@ -13,7 +15,6 @@ import collections
|
||||
import contextlib
|
||||
import copy
|
||||
import functools
|
||||
import logging
|
||||
import os.path
|
||||
import re
|
||||
import socket
|
||||
@@ -39,17 +40,7 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
|
||||
|
||||
This class implements an HTTP 1.1 client on top of Tornado's IOStreams.
|
||||
It does not currently implement all applicable parts of the HTTP
|
||||
specification, but it does enough to work with major web service APIs
|
||||
(mostly tested against the Twitter API so far).
|
||||
|
||||
This class has not been tested extensively in production and
|
||||
should be considered somewhat experimental as of the release of
|
||||
tornado 1.2. It is intended to become the default AsyncHTTPClient
|
||||
implementation in a future release. It may either be used
|
||||
directly, or to facilitate testing of this class with an existing
|
||||
application, setting the environment variable
|
||||
USE_SIMPLE_HTTPCLIENT=1 will cause this class to transparently
|
||||
replace tornado.httpclient.AsyncHTTPClient.
|
||||
specification, but it does enough to work with major web service APIs.
|
||||
|
||||
Some features found in the curl-based AsyncHTTPClient are not yet
|
||||
supported. In particular, proxies are not supported, connections
|
||||
@@ -61,19 +52,18 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
|
||||
|
||||
"""
|
||||
def initialize(self, io_loop=None, max_clients=10,
|
||||
max_simultaneous_connections=None,
|
||||
hostname_mapping=None, max_buffer_size=104857600):
|
||||
hostname_mapping=None, max_buffer_size=104857600,
|
||||
resolver=None, defaults=None):
|
||||
"""Creates a AsyncHTTPClient.
|
||||
|
||||
Only a single AsyncHTTPClient instance exists per IOLoop
|
||||
in order to provide limitations on the number of pending connections.
|
||||
force_instance=True may be used to suppress this behavior.
|
||||
|
||||
max_clients is the number of concurrent requests that can be in
|
||||
progress. max_simultaneous_connections has no effect and is accepted
|
||||
only for compatibility with the curl-based AsyncHTTPClient. Note
|
||||
that these arguments are only used when the client is first created,
|
||||
and will be ignored when an existing client is reused.
|
||||
max_clients is the number of concurrent requests that can be
|
||||
in progress. Note that this arguments are only used when the
|
||||
client is first created, and will be ignored when an existing
|
||||
client is reused.
|
||||
|
||||
hostname_mapping is a dictionary mapping hostnames to IP addresses.
|
||||
It can be used to make local DNS changes when modifying system-wide
|
||||
@@ -89,6 +79,10 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
|
||||
self.active = {}
|
||||
self.hostname_mapping = hostname_mapping
|
||||
self.max_buffer_size = max_buffer_size
|
||||
self.resolver = resolver or Resolver(io_loop=io_loop)
|
||||
self.defaults = dict(HTTPRequest._DEFAULTS)
|
||||
if defaults is not None:
|
||||
self.defaults.update(defaults)
|
||||
|
||||
def fetch(self, request, callback, **kwargs):
|
||||
if not isinstance(request, HTTPRequest):
|
||||
@@ -97,11 +91,12 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
|
||||
# so make sure we don't modify the caller's object. This is also
|
||||
# where normal dicts get converted to HTTPHeaders objects.
|
||||
request.headers = HTTPHeaders(request.headers)
|
||||
request = _RequestProxy(request, self.defaults)
|
||||
callback = stack_context.wrap(callback)
|
||||
self.queue.append((request, callback))
|
||||
self._process_queue()
|
||||
if self.queue:
|
||||
logging.debug("max_clients limit reached, request queued. "
|
||||
gen_log.debug("max_clients limit reached, request queued. "
|
||||
"%d active, %d queued requests." % (
|
||||
len(self.active), len(self.queue)))
|
||||
|
||||
@@ -126,12 +121,13 @@ class _HTTPConnection(object):
|
||||
|
||||
def __init__(self, io_loop, client, request, release_callback,
|
||||
final_callback, max_buffer_size):
|
||||
self.start_time = time.time()
|
||||
self.start_time = io_loop.time()
|
||||
self.io_loop = io_loop
|
||||
self.client = client
|
||||
self.request = request
|
||||
self.release_callback = release_callback
|
||||
self.final_callback = final_callback
|
||||
self.max_buffer_size = max_buffer_size
|
||||
self.code = None
|
||||
self.headers = None
|
||||
self.chunks = None
|
||||
@@ -139,16 +135,16 @@ class _HTTPConnection(object):
|
||||
# Timeout handle returned by IOLoop.add_timeout
|
||||
self._timeout = None
|
||||
with stack_context.StackContext(self.cleanup):
|
||||
parsed = urlparse.urlsplit(_unicode(self.request.url))
|
||||
if ssl is None and parsed.scheme == "https":
|
||||
self.parsed = urlparse.urlsplit(_unicode(self.request.url))
|
||||
if ssl is None and self.parsed.scheme == "https":
|
||||
raise ValueError("HTTPS requires either python2.6+ or "
|
||||
"curl_httpclient")
|
||||
if parsed.scheme not in ("http", "https"):
|
||||
if self.parsed.scheme not in ("http", "https"):
|
||||
raise ValueError("Unsupported url scheme: %s" %
|
||||
self.request.url)
|
||||
# urlsplit results have hostname and port results, but they
|
||||
# didn't support ipv6 literals until python 2.7.
|
||||
netloc = parsed.netloc
|
||||
netloc = self.parsed.netloc
|
||||
if "@" in netloc:
|
||||
userpass, _, netloc = netloc.rpartition("@")
|
||||
match = re.match(r'^(.+):(\d+)$', netloc)
|
||||
@@ -157,11 +153,11 @@ class _HTTPConnection(object):
|
||||
port = int(match.group(2))
|
||||
else:
|
||||
host = netloc
|
||||
port = 443 if parsed.scheme == "https" else 80
|
||||
port = 443 if self.parsed.scheme == "https" else 80
|
||||
if re.match(r'^\[.*\]$', host):
|
||||
# raw ipv6 addresses in urls are enclosed in brackets
|
||||
host = host[1:-1]
|
||||
parsed_hostname = host # save final parsed host for _on_connect
|
||||
self.parsed_hostname = host # save final host for _on_connect
|
||||
if self.client.hostname_mapping is not None:
|
||||
host = self.client.hostname_mapping.get(host, host)
|
||||
|
||||
@@ -172,66 +168,67 @@ class _HTTPConnection(object):
|
||||
# so restrict to ipv4 by default.
|
||||
af = socket.AF_INET
|
||||
|
||||
addrinfo = socket.getaddrinfo(host, port, af, socket.SOCK_STREAM,
|
||||
0, 0)
|
||||
af, socktype, proto, canonname, sockaddr = addrinfo[0]
|
||||
self.client.resolver.getaddrinfo(
|
||||
host, port, af, socket.SOCK_STREAM, 0, 0,
|
||||
callback=self._on_resolve)
|
||||
|
||||
if parsed.scheme == "https":
|
||||
ssl_options = {}
|
||||
if request.validate_cert:
|
||||
ssl_options["cert_reqs"] = ssl.CERT_REQUIRED
|
||||
if request.ca_certs is not None:
|
||||
ssl_options["ca_certs"] = request.ca_certs
|
||||
else:
|
||||
ssl_options["ca_certs"] = _DEFAULT_CA_CERTS
|
||||
if request.client_key is not None:
|
||||
ssl_options["keyfile"] = request.client_key
|
||||
if request.client_cert is not None:
|
||||
ssl_options["certfile"] = request.client_cert
|
||||
def _on_resolve(self, future):
|
||||
af, socktype, proto, canonname, sockaddr = future.result()[0]
|
||||
|
||||
# SSL interoperability is tricky. We want to disable
|
||||
# SSLv2 for security reasons; it wasn't disabled by default
|
||||
# until openssl 1.0. The best way to do this is to use
|
||||
# the SSL_OP_NO_SSLv2, but that wasn't exposed to python
|
||||
# until 3.2. Python 2.7 adds the ciphers argument, which
|
||||
# can also be used to disable SSLv2. As a last resort
|
||||
# on python 2.6, we set ssl_version to SSLv3. This is
|
||||
# more narrow than we'd like since it also breaks
|
||||
# compatibility with servers configured for TLSv1 only,
|
||||
# but nearly all servers support SSLv3:
|
||||
# http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html
|
||||
if sys.version_info >= (2, 7):
|
||||
ssl_options["ciphers"] = "DEFAULT:!SSLv2"
|
||||
else:
|
||||
# This is really only necessary for pre-1.0 versions
|
||||
# of openssl, but python 2.6 doesn't expose version
|
||||
# information.
|
||||
ssl_options["ssl_version"] = ssl.PROTOCOL_SSLv3
|
||||
|
||||
self.stream = SSLIOStream(socket.socket(af, socktype, proto),
|
||||
io_loop=self.io_loop,
|
||||
ssl_options=ssl_options,
|
||||
max_buffer_size=max_buffer_size)
|
||||
if self.parsed.scheme == "https":
|
||||
ssl_options = {}
|
||||
if self.request.validate_cert:
|
||||
ssl_options["cert_reqs"] = ssl.CERT_REQUIRED
|
||||
if self.request.ca_certs is not None:
|
||||
ssl_options["ca_certs"] = self.request.ca_certs
|
||||
else:
|
||||
self.stream = IOStream(socket.socket(af, socktype, proto),
|
||||
io_loop=self.io_loop,
|
||||
max_buffer_size=max_buffer_size)
|
||||
timeout = min(request.connect_timeout, request.request_timeout)
|
||||
if timeout:
|
||||
self._timeout = self.io_loop.add_timeout(
|
||||
self.start_time + timeout,
|
||||
stack_context.wrap(self._on_timeout))
|
||||
self.stream.set_close_callback(self._on_close)
|
||||
self.stream.connect(sockaddr,
|
||||
functools.partial(self._on_connect, parsed,
|
||||
parsed_hostname))
|
||||
ssl_options["ca_certs"] = _DEFAULT_CA_CERTS
|
||||
if self.request.client_key is not None:
|
||||
ssl_options["keyfile"] = self.request.client_key
|
||||
if self.request.client_cert is not None:
|
||||
ssl_options["certfile"] = self.request.client_cert
|
||||
|
||||
# SSL interoperability is tricky. We want to disable
|
||||
# SSLv2 for security reasons; it wasn't disabled by default
|
||||
# until openssl 1.0. The best way to do this is to use
|
||||
# the SSL_OP_NO_SSLv2, but that wasn't exposed to python
|
||||
# until 3.2. Python 2.7 adds the ciphers argument, which
|
||||
# can also be used to disable SSLv2. As a last resort
|
||||
# on python 2.6, we set ssl_version to SSLv3. This is
|
||||
# more narrow than we'd like since it also breaks
|
||||
# compatibility with servers configured for TLSv1 only,
|
||||
# but nearly all servers support SSLv3:
|
||||
# http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html
|
||||
if sys.version_info >= (2, 7):
|
||||
ssl_options["ciphers"] = "DEFAULT:!SSLv2"
|
||||
else:
|
||||
# This is really only necessary for pre-1.0 versions
|
||||
# of openssl, but python 2.6 doesn't expose version
|
||||
# information.
|
||||
ssl_options["ssl_version"] = ssl.PROTOCOL_SSLv3
|
||||
|
||||
self.stream = SSLIOStream(socket.socket(af, socktype, proto),
|
||||
io_loop=self.io_loop,
|
||||
ssl_options=ssl_options,
|
||||
max_buffer_size=self.max_buffer_size)
|
||||
else:
|
||||
self.stream = IOStream(socket.socket(af, socktype, proto),
|
||||
io_loop=self.io_loop,
|
||||
max_buffer_size=self.max_buffer_size)
|
||||
timeout = min(self.request.connect_timeout, self.request.request_timeout)
|
||||
if timeout:
|
||||
self._timeout = self.io_loop.add_timeout(
|
||||
self.start_time + timeout,
|
||||
stack_context.wrap(self._on_timeout))
|
||||
self.stream.set_close_callback(self._on_close)
|
||||
self.stream.connect(sockaddr, self._on_connect)
|
||||
|
||||
def _on_timeout(self):
|
||||
self._timeout = None
|
||||
if self.final_callback is not None:
|
||||
raise HTTPError(599, "Timeout")
|
||||
|
||||
def _on_connect(self, parsed, parsed_hostname):
|
||||
def _on_connect(self):
|
||||
if self._timeout is not None:
|
||||
self.io_loop.remove_timeout(self._timeout)
|
||||
self._timeout = None
|
||||
@@ -243,10 +240,10 @@ class _HTTPConnection(object):
|
||||
isinstance(self.stream, SSLIOStream)):
|
||||
match_hostname(self.stream.socket.getpeercert(),
|
||||
# ipv6 addresses are broken (in
|
||||
# parsed.hostname) until 2.7, here is
|
||||
# self.parsed.hostname) until 2.7, here is
|
||||
# correctly parsed value calculated in
|
||||
# __init__
|
||||
parsed_hostname)
|
||||
self.parsed_hostname)
|
||||
if (self.request.method not in self._SUPPORTED_METHODS and
|
||||
not self.request.allow_nonstandard_methods):
|
||||
raise KeyError("unknown method %s" % self.request.method)
|
||||
@@ -258,13 +255,13 @@ class _HTTPConnection(object):
|
||||
if "Connection" not in self.request.headers:
|
||||
self.request.headers["Connection"] = "close"
|
||||
if "Host" not in self.request.headers:
|
||||
if '@' in parsed.netloc:
|
||||
self.request.headers["Host"] = parsed.netloc.rpartition('@')[-1]
|
||||
if '@' in self.parsed.netloc:
|
||||
self.request.headers["Host"] = self.parsed.netloc.rpartition('@')[-1]
|
||||
else:
|
||||
self.request.headers["Host"] = parsed.netloc
|
||||
self.request.headers["Host"] = self.parsed.netloc
|
||||
username, password = None, None
|
||||
if parsed.username is not None:
|
||||
username, password = parsed.username, parsed.password
|
||||
if self.parsed.username is not None:
|
||||
username, password = self.parsed.username, self.parsed.password
|
||||
elif self.request.auth_username is not None:
|
||||
username = self.request.auth_username
|
||||
password = self.request.auth_password or ''
|
||||
@@ -287,8 +284,8 @@ class _HTTPConnection(object):
|
||||
self.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
|
||||
if self.request.use_gzip:
|
||||
self.request.headers["Accept-Encoding"] = "gzip"
|
||||
req_path = ((parsed.path or '/') +
|
||||
(('?' + parsed.query) if parsed.query else ''))
|
||||
req_path = ((self.parsed.path or '/') +
|
||||
(('?' + self.parsed.query) if self.parsed.query else ''))
|
||||
request_lines = [utf8("%s %s HTTP/1.1" % (self.request.method,
|
||||
req_path))]
|
||||
for k, v in self.request.headers.get_all():
|
||||
@@ -319,23 +316,32 @@ class _HTTPConnection(object):
|
||||
try:
|
||||
yield
|
||||
except Exception, e:
|
||||
logging.warning("uncaught exception", exc_info=True)
|
||||
gen_log.warning("uncaught exception", exc_info=True)
|
||||
self._run_callback(HTTPResponse(self.request, 599, error=e,
|
||||
request_time=time.time() - self.start_time,
|
||||
request_time=self.io_loop.time() - self.start_time,
|
||||
))
|
||||
if hasattr(self, "stream"):
|
||||
self.stream.close()
|
||||
|
||||
def _on_close(self):
|
||||
if self.final_callback is not None:
|
||||
raise HTTPError(599, "Connection closed")
|
||||
message = "Connection closed"
|
||||
if self.stream.error:
|
||||
message = str(self.stream.error)
|
||||
raise HTTPError(599, message)
|
||||
|
||||
def _on_headers(self, data):
|
||||
data = native_str(data.decode("latin1"))
|
||||
first_line, _, header_data = data.partition("\n")
|
||||
match = re.match("HTTP/1.[01] ([0-9]+)", first_line)
|
||||
match = re.match("HTTP/1.[01] ([0-9]+) ([^\r]*)", first_line)
|
||||
assert match
|
||||
self.code = int(match.group(1))
|
||||
code = int(match.group(1))
|
||||
if 100 <= code < 200:
|
||||
self.stream.read_until_regex(b("\r?\n\r?\n"), self._on_headers)
|
||||
return
|
||||
else:
|
||||
self.code = code
|
||||
self.reason = match.group(2)
|
||||
self.headers = HTTPHeaders.parse(header_data)
|
||||
|
||||
if "Content-Length" in self.headers:
|
||||
@@ -353,15 +359,18 @@ class _HTTPConnection(object):
|
||||
content_length = None
|
||||
|
||||
if self.request.header_callback is not None:
|
||||
# re-attach the newline we split on earlier
|
||||
self.request.header_callback(first_line + _)
|
||||
for k, v in self.headers.get_all():
|
||||
self.request.header_callback("%s: %s\r\n" % (k, v))
|
||||
self.request.header_callback('\r\n')
|
||||
|
||||
if self.request.method == "HEAD":
|
||||
# HEAD requests never have content, even though they may have
|
||||
# content-length headers
|
||||
if self.request.method == "HEAD" or self.code == 304:
|
||||
# HEAD requests and 304 responses never have content, even
|
||||
# though they may have content-length headers
|
||||
self._on_body(b(""))
|
||||
return
|
||||
if 100 <= self.code < 200 or self.code in (204, 304):
|
||||
if 100 <= self.code < 200 or self.code == 204:
|
||||
# These response codes never have bodies
|
||||
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
|
||||
if ("Transfer-Encoding" in self.headers or
|
||||
@@ -391,14 +400,20 @@ class _HTTPConnection(object):
|
||||
if (self.request.follow_redirects and
|
||||
self.request.max_redirects > 0 and
|
||||
self.code in (301, 302, 303, 307)):
|
||||
new_request = copy.copy(self.request)
|
||||
assert isinstance(self.request, _RequestProxy)
|
||||
new_request = copy.copy(self.request.request)
|
||||
new_request.url = urlparse.urljoin(self.request.url,
|
||||
self.headers["Location"])
|
||||
new_request.max_redirects -= 1
|
||||
new_request.max_redirects = self.request.max_redirects - 1
|
||||
del new_request.headers["Host"]
|
||||
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
|
||||
# client SHOULD make a GET request
|
||||
if self.code == 303:
|
||||
# Client SHOULD make a GET request after a 303.
|
||||
# According to the spec, 302 should be followed by the same
|
||||
# method as the original request, but in practice browsers
|
||||
# treat 302 the same as 303, and many servers use 302 for
|
||||
# compatibility with pre-HTTP/1.1 user agents which don't
|
||||
# understand the 303 status.
|
||||
if self.code in (302, 303):
|
||||
new_request.method = "GET"
|
||||
new_request.body = None
|
||||
for h in ["Content-Length", "Content-Type",
|
||||
@@ -426,8 +441,9 @@ class _HTTPConnection(object):
|
||||
else:
|
||||
buffer = BytesIO(data) # TODO: don't require one big string?
|
||||
response = HTTPResponse(original_request,
|
||||
self.code, headers=self.headers,
|
||||
request_time=time.time() - self.start_time,
|
||||
self.code, reason=self.reason,
|
||||
headers=self.headers,
|
||||
request_time=self.io_loop.time() - self.start_time,
|
||||
buffer=buffer,
|
||||
effective_url=self.request.url)
|
||||
self._run_callback(response)
|
||||
|
||||
@@ -70,7 +70,6 @@ from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import contextlib
|
||||
import functools
|
||||
import itertools
|
||||
import operator
|
||||
import sys
|
||||
import threading
|
||||
@@ -161,6 +160,7 @@ class ExceptionStackContext(object):
|
||||
return self.exception_handler(type, value, traceback)
|
||||
finally:
|
||||
_state.contexts = self.old_contexts
|
||||
self.old_contexts = None
|
||||
|
||||
|
||||
class NullContext(object):
|
||||
@@ -197,32 +197,15 @@ def wrap(fn):
|
||||
|
||||
def wrapped(*args, **kwargs):
|
||||
callback, contexts, args = args[0], args[1], args[2:]
|
||||
|
||||
if contexts is _state.contexts or not contexts:
|
||||
callback(*args, **kwargs)
|
||||
return
|
||||
if not _state.contexts:
|
||||
new_contexts = [cls(arg, active_cell)
|
||||
for (cls, arg, active_cell) in contexts
|
||||
if active_cell[0]]
|
||||
# If we're moving down the stack, _state.contexts is a prefix
|
||||
# of contexts. For each element of contexts not in that prefix,
|
||||
# create a new StackContext object.
|
||||
# If we're moving up the stack (or to an entirely different stack),
|
||||
# _state.contexts will have elements not in contexts. Use
|
||||
# NullContext to clear the state and then recreate from contexts.
|
||||
elif (len(_state.contexts) > len(contexts) or
|
||||
any(a[1] is not b[1]
|
||||
for a, b in itertools.izip(_state.contexts, contexts))):
|
||||
# contexts have been removed or changed, so start over
|
||||
new_contexts = ([NullContext()] +
|
||||
[cls(arg, active_cell)
|
||||
for (cls, arg, active_cell) in contexts
|
||||
if active_cell[0]])
|
||||
|
||||
if _state.contexts:
|
||||
new_contexts = [NullContext()]
|
||||
else:
|
||||
new_contexts = [cls(arg, active_cell)
|
||||
for (cls, arg, active_cell) in contexts[len(_state.contexts):]
|
||||
if active_cell[0]]
|
||||
new_contexts = []
|
||||
if contexts:
|
||||
new_contexts.extend(cls(arg, active_cell)
|
||||
for (cls, arg, active_cell) in contexts
|
||||
if active_cell[0])
|
||||
if len(new_contexts) > 1:
|
||||
with _nested(*new_contexts):
|
||||
callback(*args, **kwargs)
|
||||
@@ -231,10 +214,7 @@ def wrap(fn):
|
||||
callback(*args, **kwargs)
|
||||
else:
|
||||
callback(*args, **kwargs)
|
||||
if _state.contexts:
|
||||
return _StackContextWrapper(wrapped, fn, _state.contexts)
|
||||
else:
|
||||
return _StackContextWrapper(fn)
|
||||
return _StackContextWrapper(wrapped, fn, _state.contexts)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
||||
@@ -184,13 +184,13 @@ from __future__ import absolute_import, division, with_statement
|
||||
import cStringIO
|
||||
import datetime
|
||||
import linecache
|
||||
import logging
|
||||
import os.path
|
||||
import posixpath
|
||||
import re
|
||||
import threading
|
||||
|
||||
from tornado import escape
|
||||
from tornado.log import app_log
|
||||
from tornado.util import bytes_type, ObjectDict
|
||||
|
||||
_DEFAULT_AUTOESCAPE = "xhtml_escape"
|
||||
@@ -203,6 +203,9 @@ class Template(object):
|
||||
We compile into Python from the given template_string. You can generate
|
||||
the template from variables with generate().
|
||||
"""
|
||||
# note that the constructor's signature is not extracted with
|
||||
# autodoc because _UNSET looks like garbage. When changing
|
||||
# this signature update website/sphinx/template.rst too.
|
||||
def __init__(self, template_string, name="<string>", loader=None,
|
||||
compress_whitespace=None, autoescape=_UNSET):
|
||||
self.name = name
|
||||
@@ -229,7 +232,7 @@ class Template(object):
|
||||
"exec")
|
||||
except Exception:
|
||||
formatted_code = _format_code(self.code).rstrip()
|
||||
logging.error("%s code:\n%s", self.name, formatted_code)
|
||||
app_log.error("%s code:\n%s", self.name, formatted_code)
|
||||
raise
|
||||
|
||||
def generate(self, **kwargs):
|
||||
@@ -257,12 +260,7 @@ class Template(object):
|
||||
# we've generated a new template (mainly for this module's
|
||||
# unittests, where different tests reuse the same name).
|
||||
linecache.clearcache()
|
||||
try:
|
||||
return execute()
|
||||
except Exception:
|
||||
formatted_code = _format_code(self.code).rstrip()
|
||||
logging.error("%s code:\n%s", self.name, formatted_code)
|
||||
raise
|
||||
return execute()
|
||||
|
||||
def _generate_python(self, loader, compress_whitespace):
|
||||
buffer = cStringIO.StringIO()
|
||||
@@ -484,7 +482,7 @@ class _ApplyBlock(_Node):
|
||||
writer.write_line("_append = _buffer.append", self.line)
|
||||
self.body.generate(writer)
|
||||
writer.write_line("return _utf8('').join(_buffer)", self.line)
|
||||
writer.write_line("_append(%s(%s()))" % (
|
||||
writer.write_line("_append(_utf8(%s(%s())))" % (
|
||||
self.method, method_name), self.line)
|
||||
|
||||
|
||||
@@ -501,6 +499,8 @@ class _ControlBlock(_Node):
|
||||
writer.write_line("%s:" % self.statement, self.line)
|
||||
with writer.indent():
|
||||
self.body.generate(writer)
|
||||
# Just in case the body was empty
|
||||
writer.write_line("pass", self.line)
|
||||
|
||||
|
||||
class _IntermediateControlBlock(_Node):
|
||||
@@ -509,6 +509,8 @@ class _IntermediateControlBlock(_Node):
|
||||
self.line = line
|
||||
|
||||
def generate(self, writer):
|
||||
# In case the previous block was empty
|
||||
writer.write_line("pass", self.line)
|
||||
writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1)
|
||||
|
||||
|
||||
|
||||
@@ -26,34 +26,65 @@ try:
|
||||
from tornado.httpserver import HTTPServer
|
||||
from tornado.simple_httpclient import SimpleAsyncHTTPClient
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado import netutil
|
||||
except ImportError:
|
||||
# These modules are not importable on app engine. Parts of this module
|
||||
# won't work, but e.g. LogTrapTestCase and main() will.
|
||||
AsyncHTTPClient = None
|
||||
HTTPServer = None
|
||||
IOLoop = None
|
||||
netutil = None
|
||||
SimpleAsyncHTTPClient = None
|
||||
from tornado.stack_context import StackContext, NullContext
|
||||
from tornado.log import gen_log
|
||||
from tornado.stack_context import StackContext
|
||||
from tornado.util import raise_exc_info
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import unittest
|
||||
|
||||
# Tornado's own test suite requires the updated unittest module
|
||||
# (either py27+ or unittest2) so tornado.test.util enforces
|
||||
# this requirement, but for other users of tornado.testing we want
|
||||
# to allow the older version if unitest2 is not available.
|
||||
try:
|
||||
import unittest2 as unittest
|
||||
except ImportError:
|
||||
import unittest
|
||||
|
||||
_next_port = 10000
|
||||
|
||||
|
||||
def get_unused_port():
|
||||
"""Returns a (hopefully) unused port number."""
|
||||
"""Returns a (hopefully) unused port number.
|
||||
|
||||
This function does not guarantee that the port it returns is available,
|
||||
only that a series of get_unused_port calls in a single process return
|
||||
distinct ports.
|
||||
|
||||
**Deprecated**. Use bind_unused_port instead, which is guaranteed
|
||||
to find an unused port.
|
||||
"""
|
||||
global _next_port
|
||||
port = _next_port
|
||||
_next_port = _next_port + 1
|
||||
return port
|
||||
|
||||
|
||||
def bind_unused_port():
|
||||
"""Binds a server socket to an available port on localhost.
|
||||
|
||||
Returns a tuple (socket, port).
|
||||
"""
|
||||
[sock] = netutil.bind_sockets(0, 'localhost', family=socket.AF_INET)
|
||||
port = sock.getsockname()[1]
|
||||
return sock, port
|
||||
|
||||
|
||||
class AsyncTestCase(unittest.TestCase):
|
||||
"""TestCase subclass for testing IOLoop-based asynchronous code.
|
||||
|
||||
@@ -116,8 +147,10 @@ class AsyncTestCase(unittest.TestCase):
|
||||
def setUp(self):
|
||||
super(AsyncTestCase, self).setUp()
|
||||
self.io_loop = self.get_new_ioloop()
|
||||
self.io_loop.make_current()
|
||||
|
||||
def tearDown(self):
|
||||
self.io_loop.clear_current()
|
||||
if (not IOLoop.initialized() or
|
||||
self.io_loop is not IOLoop.instance()):
|
||||
# Try to clean up any file descriptors left open in the ioloop.
|
||||
@@ -189,14 +222,10 @@ class AsyncTestCase(unittest.TestCase):
|
||||
self.stop()
|
||||
if self.__timeout is not None:
|
||||
self.io_loop.remove_timeout(self.__timeout)
|
||||
self.__timeout = self.io_loop.add_timeout(time.time() + timeout, timeout_func)
|
||||
self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, timeout_func)
|
||||
while True:
|
||||
self.__running = True
|
||||
with NullContext():
|
||||
# Wipe out the StackContext that was established in
|
||||
# self.run() so that all callbacks executed inside the
|
||||
# IOLoop will re-run it.
|
||||
self.io_loop.start()
|
||||
self.io_loop.start()
|
||||
if (self.__failure is not None or
|
||||
condition is None or condition()):
|
||||
break
|
||||
@@ -233,12 +262,13 @@ class AsyncHTTPTestCase(AsyncTestCase):
|
||||
'''
|
||||
def setUp(self):
|
||||
super(AsyncHTTPTestCase, self).setUp()
|
||||
self.__port = None
|
||||
sock, port = bind_unused_port()
|
||||
self.__port = port
|
||||
|
||||
self.http_client = self.get_http_client()
|
||||
self._app = self.get_app()
|
||||
self.http_server = self.get_http_server()
|
||||
self.http_server.listen(self.get_http_port(), address="127.0.0.1")
|
||||
self.http_server.add_sockets([sock])
|
||||
|
||||
def get_http_client(self):
|
||||
return AsyncHTTPClient(io_loop=self.io_loop)
|
||||
@@ -247,7 +277,6 @@ class AsyncHTTPTestCase(AsyncTestCase):
|
||||
return HTTPServer(self._app, io_loop=self.io_loop,
|
||||
**self.get_httpserver_options())
|
||||
|
||||
|
||||
def get_app(self):
|
||||
"""Should be overridden by subclasses to return a
|
||||
tornado.web.Application or other HTTPServer callback.
|
||||
@@ -276,8 +305,6 @@ class AsyncHTTPTestCase(AsyncTestCase):
|
||||
|
||||
A new port is chosen for each test.
|
||||
"""
|
||||
if self.__port is None:
|
||||
self.__port = get_unused_port()
|
||||
return self.__port
|
||||
|
||||
def get_protocol(self):
|
||||
@@ -290,7 +317,9 @@ class AsyncHTTPTestCase(AsyncTestCase):
|
||||
|
||||
def tearDown(self):
|
||||
self.http_server.stop()
|
||||
self.http_client.close()
|
||||
if (not IOLoop.initialized() or
|
||||
self.http_client.io_loop is not IOLoop.instance()):
|
||||
self.http_client.close()
|
||||
super(AsyncHTTPTestCase, self).tearDown()
|
||||
|
||||
|
||||
@@ -302,7 +331,8 @@ class AsyncHTTPSTestCase(AsyncHTTPTestCase):
|
||||
def get_http_client(self):
|
||||
# Some versions of libcurl have deadlock bugs with ssl,
|
||||
# so always run these tests with SimpleAsyncHTTPClient.
|
||||
return SimpleAsyncHTTPClient(io_loop=self.io_loop, force_instance=True)
|
||||
return SimpleAsyncHTTPClient(io_loop=self.io_loop, force_instance=True,
|
||||
defaults=dict(validate_cert=False))
|
||||
|
||||
def get_httpserver_options(self):
|
||||
return dict(ssl_options=self.get_ssl_options())
|
||||
@@ -322,10 +352,6 @@ class AsyncHTTPSTestCase(AsyncHTTPTestCase):
|
||||
def get_protocol(self):
|
||||
return 'https'
|
||||
|
||||
def fetch(self, path, **kwargs):
|
||||
return AsyncHTTPTestCase.fetch(self, path, validate_cert=False,
|
||||
**kwargs)
|
||||
|
||||
|
||||
class LogTrapTestCase(unittest.TestCase):
|
||||
"""A test case that captures and discards all logging output
|
||||
@@ -357,7 +383,7 @@ class LogTrapTestCase(unittest.TestCase):
|
||||
old_stream = handler.stream
|
||||
try:
|
||||
handler.stream = StringIO()
|
||||
logging.info("RUNNING TEST: " + str(self))
|
||||
gen_log.info("RUNNING TEST: " + str(self))
|
||||
old_error_count = len(result.failures) + len(result.errors)
|
||||
super(LogTrapTestCase, self).run(result)
|
||||
new_error_count = len(result.failures) + len(result.errors)
|
||||
@@ -367,6 +393,50 @@ class LogTrapTestCase(unittest.TestCase):
|
||||
handler.stream = old_stream
|
||||
|
||||
|
||||
class ExpectLog(logging.Filter):
|
||||
"""Context manager to capture and suppress expected log output.
|
||||
|
||||
Useful to make tests of error conditions less noisy, while still
|
||||
leaving unexpected log entries visible. *Not thread safe.*
|
||||
|
||||
Usage::
|
||||
|
||||
with ExpectLog('tornado.application', "Uncaught exception"):
|
||||
error_response = self.fetch("/some_page")
|
||||
"""
|
||||
def __init__(self, logger, regex, required=True):
|
||||
"""Constructs an ExpectLog context manager.
|
||||
|
||||
:param logger: Logger object (or name of logger) to watch. Pass
|
||||
an empty string to watch the root logger.
|
||||
:param regex: Regular expression to match. Any log entries on
|
||||
the specified logger that match this regex will be suppressed.
|
||||
:param required: If true, an exeption will be raised if the end of
|
||||
the ``with`` statement is reached without matching any log entries.
|
||||
"""
|
||||
if isinstance(logger, basestring):
|
||||
logger = logging.getLogger(logger)
|
||||
self.logger = logger
|
||||
self.regex = re.compile(regex)
|
||||
self.required = required
|
||||
self.matched = False
|
||||
|
||||
def filter(self, record):
|
||||
message = record.getMessage()
|
||||
if self.regex.match(message):
|
||||
self.matched = True
|
||||
return False
|
||||
return True
|
||||
|
||||
def __enter__(self):
|
||||
self.logger.addFilter(self)
|
||||
|
||||
def __exit__(self, typ, value, tb):
|
||||
self.logger.removeFilter(self)
|
||||
if not typ and self.required and not self.matched:
|
||||
raise Exception("did not get expected log message")
|
||||
|
||||
|
||||
def main(**kwargs):
|
||||
"""A simple test runner.
|
||||
|
||||
@@ -400,23 +470,35 @@ def main(**kwargs):
|
||||
"""
|
||||
from tornado.options import define, options, parse_command_line
|
||||
|
||||
define('autoreload', type=bool, default=False,
|
||||
help="DEPRECATED: use tornado.autoreload.main instead")
|
||||
define('httpclient', type=str, default=None)
|
||||
define('exception_on_interrupt', type=bool, default=True,
|
||||
help=("If true (default), ctrl-c raises a KeyboardInterrupt "
|
||||
"exception. This prints a stack trace but cannot interrupt "
|
||||
"certain operations. If false, the process is more reliably "
|
||||
"killed, but does not print a stack trace."))
|
||||
argv = [sys.argv[0]] + parse_command_line(sys.argv)
|
||||
|
||||
if options.httpclient:
|
||||
from tornado.httpclient import AsyncHTTPClient
|
||||
AsyncHTTPClient.configure(options.httpclient)
|
||||
# support the same options as unittest's command-line interface
|
||||
define('verbose', type=bool)
|
||||
define('quiet', type=bool)
|
||||
define('failfast', type=bool)
|
||||
define('catch', type=bool)
|
||||
define('buffer', type=bool)
|
||||
|
||||
argv = [sys.argv[0]] + parse_command_line(sys.argv)
|
||||
|
||||
if not options.exception_on_interrupt:
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
|
||||
if options.verbose is not None:
|
||||
kwargs['verbosity'] = 2
|
||||
if options.quiet is not None:
|
||||
kwargs['verbosity'] = 0
|
||||
if options.failfast is not None:
|
||||
kwargs['failfast'] = True
|
||||
if options.catch is not None:
|
||||
kwargs['catchbreak'] = True
|
||||
if options.buffer is not None:
|
||||
kwargs['buffer'] = True
|
||||
|
||||
if __name__ == '__main__' and len(argv) == 1:
|
||||
print >> sys.stderr, "No tests specified"
|
||||
sys.exit(1)
|
||||
@@ -433,14 +515,10 @@ def main(**kwargs):
|
||||
unittest.main(defaultTest="all", argv=argv, **kwargs)
|
||||
except SystemExit, e:
|
||||
if e.code == 0:
|
||||
logging.info('PASS')
|
||||
gen_log.info('PASS')
|
||||
else:
|
||||
logging.error('FAIL')
|
||||
if not options.autoreload:
|
||||
raise
|
||||
if options.autoreload:
|
||||
import tornado.autoreload
|
||||
tornado.autoreload.wait()
|
||||
gen_log.error('FAIL')
|
||||
raise
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
@@ -96,6 +96,102 @@ def raise_exc_info(exc_info):
|
||||
# After 2to3: raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
|
||||
|
||||
|
||||
class Configurable(object):
|
||||
"""Base class for configurable interfaces.
|
||||
|
||||
A configurable interface is an (abstract) class whose constructor
|
||||
acts as a factory function for one of its implementation subclasses.
|
||||
The implementation subclass as well as optional keyword arguments to
|
||||
its initializer can be set globally at runtime with `configure`.
|
||||
|
||||
By using the constructor as the factory method, the interface looks like
|
||||
a normal class, ``isinstance()`` works as usual, etc. This pattern
|
||||
is most useful when the choice of implementation is likely to be a
|
||||
global decision (e.g. when epoll is available, always use it instead of
|
||||
select), or when a previously-monolithic class has been split into
|
||||
specialized subclasses.
|
||||
|
||||
Configurable subclasses must define the class methods
|
||||
`configurable_base` and `configurable_default`, and use the instance
|
||||
method `initialize` instead of `__init__`.
|
||||
"""
|
||||
__impl_class = None
|
||||
__impl_kwargs = None
|
||||
|
||||
def __new__(cls, **kwargs):
|
||||
base = cls.configurable_base()
|
||||
args = {}
|
||||
if cls is base:
|
||||
impl = cls.configured_class()
|
||||
if base.__impl_kwargs:
|
||||
args.update(base.__impl_kwargs)
|
||||
else:
|
||||
impl = cls
|
||||
args.update(kwargs)
|
||||
instance = super(Configurable, cls).__new__(impl)
|
||||
# initialize vs __init__ chosen for compatiblity with AsyncHTTPClient
|
||||
# singleton magic. If we get rid of that we can switch to __init__
|
||||
# here too.
|
||||
instance.initialize(**args)
|
||||
return instance
|
||||
|
||||
@classmethod
|
||||
def configurable_base(cls):
|
||||
"""Returns the base class of a configurable hierarchy.
|
||||
|
||||
This will normally return the class in which it is defined.
|
||||
(which is *not* necessarily the same as the cls classmethod parameter).
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def configurable_default(cls):
|
||||
"""Returns the implementation class to be used if none is configured."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def initialize(self):
|
||||
"""Initialize a `Configurable` subclass instance.
|
||||
|
||||
Configurable classes should use `initialize` instead of `__init__`.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def configure(cls, impl, **kwargs):
|
||||
"""Sets the class to use when the base class is instantiated.
|
||||
|
||||
Keyword arguments will be saved and added to the arguments passed
|
||||
to the constructor. This can be used to set global defaults for
|
||||
some parameters.
|
||||
"""
|
||||
base = cls.configurable_base()
|
||||
if isinstance(impl, (unicode, bytes_type)):
|
||||
impl = import_object(impl)
|
||||
if impl is not None and not issubclass(impl, cls):
|
||||
raise ValueError("Invalid subclass of %s" % cls)
|
||||
base.__impl_class = impl
|
||||
base.__impl_kwargs = kwargs
|
||||
|
||||
@classmethod
|
||||
def configured_class(cls):
|
||||
"""Returns the currently configured class."""
|
||||
base = cls.configurable_base()
|
||||
if cls.__impl_class is None:
|
||||
base.__impl_class = cls.configurable_default()
|
||||
return base.__impl_class
|
||||
|
||||
|
||||
@classmethod
|
||||
def _save_configuration(cls):
|
||||
base = cls.configurable_base()
|
||||
return (base.__impl_class, base.__impl_kwargs)
|
||||
|
||||
@classmethod
|
||||
def _restore_configuration(cls, saved):
|
||||
base = cls.configurable_base()
|
||||
base.__impl_class = saved[0]
|
||||
base.__impl_kwargs = saved[1]
|
||||
|
||||
|
||||
def doctests():
|
||||
import doctest
|
||||
return doctest.DocTestSuite()
|
||||
|
||||
@@ -63,7 +63,6 @@ import hashlib
|
||||
import hmac
|
||||
import httplib
|
||||
import itertools
|
||||
import logging
|
||||
import mimetypes
|
||||
import os.path
|
||||
import re
|
||||
@@ -80,6 +79,7 @@ import uuid
|
||||
|
||||
from tornado import escape
|
||||
from tornado import locale
|
||||
from tornado.log import access_log, app_log, gen_log
|
||||
from tornado import stack_context
|
||||
from tornado import template
|
||||
from tornado.escape import utf8, _unicode
|
||||
@@ -105,12 +105,16 @@ class RequestHandler(object):
|
||||
_template_loader_lock = threading.Lock()
|
||||
|
||||
def __init__(self, application, request, **kwargs):
|
||||
super(RequestHandler, self).__init__()
|
||||
|
||||
self.application = application
|
||||
self.request = request
|
||||
self._headers_written = False
|
||||
self._finished = False
|
||||
self._auto_finish = True
|
||||
self._transforms = None # will be set in _execute
|
||||
self.path_args = None
|
||||
self.path_kwargs = None
|
||||
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
|
||||
application.ui_methods.iteritems())
|
||||
# UIModules are available as both `modules` and `_modules` in the
|
||||
@@ -219,6 +223,8 @@ class RequestHandler(object):
|
||||
self._headers = {
|
||||
"Server": "TornadoServer/%s" % tornado.version,
|
||||
"Content-Type": "text/html; charset=UTF-8",
|
||||
"Date": datetime.datetime.utcnow().strftime(
|
||||
"%a, %d %b %Y %H:%M:%S GMT"),
|
||||
}
|
||||
self._list_headers = []
|
||||
self.set_default_headers()
|
||||
@@ -227,6 +233,7 @@ class RequestHandler(object):
|
||||
self.set_header("Connection", "Keep-Alive")
|
||||
self._write_buffer = []
|
||||
self._status_code = 200
|
||||
self._reason = httplib.responses[200]
|
||||
|
||||
def set_default_headers(self):
|
||||
"""Override this to set HTTP headers at the beginning of the request.
|
||||
@@ -238,10 +245,22 @@ class RequestHandler(object):
|
||||
"""
|
||||
pass
|
||||
|
||||
def set_status(self, status_code):
|
||||
"""Sets the status code for our response."""
|
||||
assert status_code in httplib.responses
|
||||
def set_status(self, status_code, reason=None):
|
||||
"""Sets the status code for our response.
|
||||
|
||||
:arg int status_code: Response status code. If `reason` is ``None``,
|
||||
it must be present in `httplib.responses`.
|
||||
:arg string reason: Human-readable reason phrase describing the status
|
||||
code. If ``None``, it will be filled in from `httplib.responses`.
|
||||
"""
|
||||
self._status_code = status_code
|
||||
if reason is not None:
|
||||
self._reason = escape.native_str(reason)
|
||||
else:
|
||||
try:
|
||||
self._reason = httplib.responses[status_code]
|
||||
except KeyError:
|
||||
raise ValueError("unknown status code %d", status_code)
|
||||
|
||||
def get_status(self):
|
||||
"""Returns the status code for our response."""
|
||||
@@ -600,7 +619,20 @@ class RequestHandler(object):
|
||||
else:
|
||||
loader = RequestHandler._template_loaders[template_path]
|
||||
t = loader.load(template_name)
|
||||
args = dict(
|
||||
namespace = self.get_template_namespace()
|
||||
namespace.update(kwargs)
|
||||
return t.generate(**namespace)
|
||||
|
||||
def get_template_namespace(self):
|
||||
"""Returns a dictionary to be used as the default template namespace.
|
||||
|
||||
May be overridden by subclasses to add or modify values.
|
||||
|
||||
The results of this method will be combined with additional
|
||||
defaults in the `tornado.template` module and keyword arguments
|
||||
to `render` or `render_string`.
|
||||
"""
|
||||
namespace = dict(
|
||||
handler=self,
|
||||
request=self.request,
|
||||
current_user=self.current_user,
|
||||
@@ -610,11 +642,17 @@ class RequestHandler(object):
|
||||
xsrf_form_html=self.xsrf_form_html,
|
||||
reverse_url=self.reverse_url
|
||||
)
|
||||
args.update(self.ui)
|
||||
args.update(kwargs)
|
||||
return t.generate(**args)
|
||||
namespace.update(self.ui)
|
||||
return namespace
|
||||
|
||||
def create_template_loader(self, template_path):
|
||||
"""Returns a new template loader for the given path.
|
||||
|
||||
May be overridden by subclasses. By default returns a
|
||||
directory-based loader on the given path, using the
|
||||
``autoescape`` application setting. If a ``template_loader``
|
||||
application setting is supplied, uses that instead.
|
||||
"""
|
||||
settings = self.application.settings
|
||||
if "template_loader" in settings:
|
||||
return settings["template_loader"]
|
||||
@@ -715,16 +753,22 @@ class RequestHandler(object):
|
||||
Additional keyword arguments are passed through to `write_error`.
|
||||
"""
|
||||
if self._headers_written:
|
||||
logging.error("Cannot send error response after headers written")
|
||||
gen_log.error("Cannot send error response after headers written")
|
||||
if not self._finished:
|
||||
self.finish()
|
||||
return
|
||||
self.clear()
|
||||
self.set_status(status_code)
|
||||
|
||||
reason = None
|
||||
if 'exc_info' in kwargs:
|
||||
exception = kwargs['exc_info'][1]
|
||||
if isinstance(exception, HTTPError) and exception.reason:
|
||||
reason = exception.reason
|
||||
self.set_status(status_code, reason=reason)
|
||||
try:
|
||||
self.write_error(status_code, **kwargs)
|
||||
except Exception:
|
||||
logging.error("Uncaught exception in write_error", exc_info=True)
|
||||
app_log.error("Uncaught exception in write_error", exc_info=True)
|
||||
if not self._finished:
|
||||
self.finish()
|
||||
|
||||
@@ -734,10 +778,11 @@ class RequestHandler(object):
|
||||
``write_error`` may call `write`, `render`, `set_header`, etc
|
||||
to produce output as usual.
|
||||
|
||||
If this error was caused by an uncaught exception, an ``exc_info``
|
||||
triple will be available as ``kwargs["exc_info"]``. Note that this
|
||||
exception may not be the "current" exception for purposes of
|
||||
methods like ``sys.exc_info()`` or ``traceback.format_exc``.
|
||||
If this error was caused by an uncaught exception (including
|
||||
HTTPError), an ``exc_info`` triple will be available as
|
||||
``kwargs["exc_info"]``. Note that this exception may not be
|
||||
the "current" exception for purposes of methods like
|
||||
``sys.exc_info()`` or ``traceback.format_exc``.
|
||||
|
||||
For historical reasons, if a method ``get_error_html`` exists,
|
||||
it will be used instead of the default ``write_error`` implementation.
|
||||
@@ -768,7 +813,7 @@ class RequestHandler(object):
|
||||
self.finish("<html><title>%(code)d: %(message)s</title>"
|
||||
"<body>%(code)d: %(message)s</body></html>" % {
|
||||
"code": status_code,
|
||||
"message": httplib.responses[status_code],
|
||||
"message": self._reason,
|
||||
})
|
||||
|
||||
@property
|
||||
@@ -964,7 +1009,7 @@ class RequestHandler(object):
|
||||
return callback(*args, **kwargs)
|
||||
except Exception, e:
|
||||
if self._headers_written:
|
||||
logging.error("Exception after headers written",
|
||||
app_log.error("Exception after headers written",
|
||||
exc_info=True)
|
||||
else:
|
||||
self._handle_request_exception(e)
|
||||
@@ -1008,6 +1053,9 @@ class RequestHandler(object):
|
||||
try:
|
||||
if self.request.method not in self.SUPPORTED_METHODS:
|
||||
raise HTTPError(405)
|
||||
self.path_args = [self.decode_argument(arg) for arg in args]
|
||||
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
|
||||
for (k, v) in kwargs.iteritems())
|
||||
# If XSRF cookies are turned on, reject form submissions without
|
||||
# the proper cookie
|
||||
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
|
||||
@@ -1015,19 +1063,18 @@ class RequestHandler(object):
|
||||
self.check_xsrf_cookie()
|
||||
self.prepare()
|
||||
if not self._finished:
|
||||
args = [self.decode_argument(arg) for arg in args]
|
||||
kwargs = dict((k, self.decode_argument(v, name=k))
|
||||
for (k, v) in kwargs.iteritems())
|
||||
getattr(self, self.request.method.lower())(*args, **kwargs)
|
||||
getattr(self, self.request.method.lower())(
|
||||
*self.path_args, **self.path_kwargs)
|
||||
if self._auto_finish and not self._finished:
|
||||
self.finish()
|
||||
except Exception, e:
|
||||
self._handle_request_exception(e)
|
||||
|
||||
def _generate_headers(self):
|
||||
reason = self._reason
|
||||
lines = [utf8(self.request.version + " " +
|
||||
str(self._status_code) +
|
||||
" " + httplib.responses[self._status_code])]
|
||||
" " + reason)]
|
||||
lines.extend([(utf8(n) + b(": ") + utf8(v)) for n, v in
|
||||
itertools.chain(self._headers.iteritems(), self._list_headers)])
|
||||
if hasattr(self, "_new_cookie"):
|
||||
@@ -1053,14 +1100,14 @@ class RequestHandler(object):
|
||||
if e.log_message:
|
||||
format = "%d %s: " + e.log_message
|
||||
args = [e.status_code, self._request_summary()] + list(e.args)
|
||||
logging.warning(format, *args)
|
||||
if e.status_code not in httplib.responses:
|
||||
logging.error("Bad HTTP status code: %d", e.status_code)
|
||||
gen_log.warning(format, *args)
|
||||
if e.status_code not in httplib.responses and not e.reason:
|
||||
gen_log.error("Bad HTTP status code: %d", e.status_code)
|
||||
self.send_error(500, exc_info=sys.exc_info())
|
||||
else:
|
||||
self.send_error(e.status_code, exc_info=sys.exc_info())
|
||||
else:
|
||||
logging.error("Uncaught exception %s\n%r", self._request_summary(),
|
||||
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
|
||||
self.request, exc_info=True)
|
||||
self.send_error(500, exc_info=sys.exc_info())
|
||||
|
||||
@@ -1205,18 +1252,6 @@ class Application(object):
|
||||
and we will serve /favicon.ico and /robots.txt from the same directory.
|
||||
A custom subclass of StaticFileHandler can be specified with the
|
||||
static_handler_class setting.
|
||||
|
||||
.. attribute:: settings
|
||||
|
||||
Additional keyword arguments passed to the constructor are saved in the
|
||||
`settings` dictionary, and are often referred to in documentation as
|
||||
"application settings".
|
||||
|
||||
.. attribute:: debug
|
||||
|
||||
If `True` the application runs in debug mode, described in
|
||||
:ref:`debug-mode`. This is an application setting in the `settings`
|
||||
dictionary, so handlers can access it.
|
||||
"""
|
||||
def __init__(self, handlers=None, default_host="", transforms=None,
|
||||
wsgi=False, **settings):
|
||||
@@ -1319,7 +1354,7 @@ class Application(object):
|
||||
handlers.append(spec)
|
||||
if spec.name:
|
||||
if spec.name in self.named_handlers:
|
||||
logging.warning(
|
||||
app_log.warning(
|
||||
"Multiple handlers named %s; replacing previous value",
|
||||
spec.name)
|
||||
self.named_handlers[spec.name] = spec
|
||||
@@ -1443,26 +1478,40 @@ class Application(object):
|
||||
self.settings["log_function"](handler)
|
||||
return
|
||||
if handler.get_status() < 400:
|
||||
log_method = logging.info
|
||||
log_method = access_log.info
|
||||
elif handler.get_status() < 500:
|
||||
log_method = logging.warning
|
||||
log_method = access_log.warning
|
||||
else:
|
||||
log_method = logging.error
|
||||
log_method = access_log.error
|
||||
request_time = 1000.0 * handler.request.request_time()
|
||||
log_method("%d %s %.2fms", handler.get_status(),
|
||||
handler._request_summary(), request_time)
|
||||
|
||||
|
||||
class HTTPError(Exception):
|
||||
"""An exception that will turn into an HTTP error response."""
|
||||
def __init__(self, status_code, log_message=None, *args):
|
||||
"""An exception that will turn into an HTTP error response.
|
||||
|
||||
:arg int status_code: HTTP status code. Must be listed in
|
||||
`httplib.responses` unless the ``reason`` keyword argument is given.
|
||||
:arg string log_message: Message to be written to the log for this error
|
||||
(will not be shown to the user unless the `Application` is in debug
|
||||
mode). May contain ``%s``-style placeholders, which will be filled
|
||||
in with remaining positional parameters.
|
||||
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
|
||||
to pass in the status line along with ``status_code``. Normally
|
||||
determined automatically from ``status_code``, but can be used
|
||||
to use a non-standard numeric code.
|
||||
"""
|
||||
def __init__(self, status_code, log_message=None, *args, **kwargs):
|
||||
self.status_code = status_code
|
||||
self.log_message = log_message
|
||||
self.args = args
|
||||
self.reason = kwargs.get('reason', None)
|
||||
|
||||
def __str__(self):
|
||||
message = "HTTP %d: %s" % (
|
||||
self.status_code, httplib.responses[self.status_code])
|
||||
self.status_code,
|
||||
self.reason or httplib.responses.get(self.status_code, 'Unknown'))
|
||||
if self.log_message:
|
||||
return message + " (" + (self.log_message % self.args) + ")"
|
||||
else:
|
||||
@@ -1477,6 +1526,12 @@ class ErrorHandler(RequestHandler):
|
||||
def prepare(self):
|
||||
raise HTTPError(self._status_code)
|
||||
|
||||
def check_xsrf_cookie(self):
|
||||
# POSTs to an ErrorHandler don't actually have side effects,
|
||||
# so we don't need to check the xsrf token. This allows POSTs
|
||||
# to the wrong url to return a 404 instead of 403.
|
||||
pass
|
||||
|
||||
|
||||
class RedirectHandler(RequestHandler):
|
||||
"""Redirects the client to the given URL for all GET requests.
|
||||
@@ -1563,11 +1618,9 @@ class StaticFileHandler(RequestHandler):
|
||||
cache_time = self.get_cache_time(path, modified, mime_type)
|
||||
|
||||
if cache_time > 0:
|
||||
self.set_header("Expires", datetime.datetime.utcnow() + \
|
||||
self.set_header("Expires", datetime.datetime.utcnow() +
|
||||
datetime.timedelta(seconds=cache_time))
|
||||
self.set_header("Cache-Control", "max-age=" + str(cache_time))
|
||||
else:
|
||||
self.set_header("Cache-Control", "public")
|
||||
|
||||
self.set_extra_headers(path)
|
||||
|
||||
@@ -1583,9 +1636,6 @@ class StaticFileHandler(RequestHandler):
|
||||
|
||||
with open(abspath, "rb") as file:
|
||||
data = file.read()
|
||||
hasher = hashlib.sha1()
|
||||
hasher.update(data)
|
||||
self.set_header("Etag", '"%s"' % hasher.hexdigest())
|
||||
if include_body:
|
||||
self.write(data)
|
||||
else:
|
||||
@@ -1646,7 +1696,7 @@ class StaticFileHandler(RequestHandler):
|
||||
hashes[abs_path] = hashlib.md5(f.read()).hexdigest()
|
||||
f.close()
|
||||
except Exception:
|
||||
logging.error("Could not open static file %r", path)
|
||||
gen_log.error("Could not open static file %r", path)
|
||||
hashes[abs_path] = None
|
||||
hsh = hashes.get(abs_path)
|
||||
if hsh:
|
||||
@@ -1721,6 +1771,10 @@ class GZipContentEncoding(OutputTransform):
|
||||
"gzip" in request.headers.get("Accept-Encoding", "")
|
||||
|
||||
def transform_first_chunk(self, status_code, headers, chunk, finishing):
|
||||
if 'Vary' in headers:
|
||||
headers['Vary'] += b(', Accept-Encoding')
|
||||
else:
|
||||
headers['Vary'] = b('Accept-Encoding')
|
||||
if self._gzipping:
|
||||
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
|
||||
self._gzipping = (ctype in self.CONTENT_TYPES) and \
|
||||
@@ -1956,6 +2010,11 @@ class URLSpec(object):
|
||||
self.name = name
|
||||
self._path, self._group_count = self._find_groups()
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%r, %s, kwargs=%r, name=%r)' % \
|
||||
(self.__class__.__name__, self.regex.pattern,
|
||||
self.handler_class, self.kwargs, self.name)
|
||||
|
||||
def _find_groups(self):
|
||||
"""Returns a tuple (reverse string, group count) for a url.
|
||||
|
||||
@@ -2001,17 +2060,20 @@ class URLSpec(object):
|
||||
url = URLSpec
|
||||
|
||||
|
||||
def _time_independent_equals(a, b):
|
||||
if len(a) != len(b):
|
||||
return False
|
||||
result = 0
|
||||
if type(a[0]) is int: # python3 byte strings
|
||||
for x, y in zip(a, b):
|
||||
result |= x ^ y
|
||||
else: # python2
|
||||
for x, y in zip(a, b):
|
||||
result |= ord(x) ^ ord(y)
|
||||
return result == 0
|
||||
if hasattr(hmac, 'compare_digest'): # python 3.3
|
||||
_time_independent_equals = hmac.compare_digest
|
||||
else:
|
||||
def _time_independent_equals(a, b):
|
||||
if len(a) != len(b):
|
||||
return False
|
||||
result = 0
|
||||
if type(a[0]) is int: # python3 byte strings
|
||||
for x, y in zip(a, b):
|
||||
result |= x ^ y
|
||||
else: # python2
|
||||
for x, y in zip(a, b):
|
||||
result |= ord(x) ^ ord(y)
|
||||
return result == 0
|
||||
|
||||
|
||||
def create_signed_value(secret, name, value):
|
||||
@@ -2030,11 +2092,11 @@ def decode_signed_value(secret, name, value, max_age_days=31):
|
||||
return None
|
||||
signature = _create_signature(secret, name, parts[0], parts[1])
|
||||
if not _time_independent_equals(parts[2], signature):
|
||||
logging.warning("Invalid cookie signature %r", value)
|
||||
gen_log.warning("Invalid cookie signature %r", value)
|
||||
return None
|
||||
timestamp = int(parts[1])
|
||||
if timestamp < time.time() - max_age_days * 86400:
|
||||
logging.warning("Expired cookie %r", value)
|
||||
gen_log.warning("Expired cookie %r", value)
|
||||
return None
|
||||
if timestamp > time.time() + 31 * 86400:
|
||||
# _cookie_signature does not hash a delimiter between the
|
||||
@@ -2042,10 +2104,10 @@ def decode_signed_value(secret, name, value, max_age_days=31):
|
||||
# digits from the payload to the timestamp without altering the
|
||||
# signature. For backwards compatibility, sanity-check timestamp
|
||||
# here instead of modifying _cookie_signature.
|
||||
logging.warning("Cookie timestamp in future; possible tampering %r", value)
|
||||
gen_log.warning("Cookie timestamp in future; possible tampering %r", value)
|
||||
return None
|
||||
if parts[1].startswith(b("0")):
|
||||
logging.warning("Tampered cookie %r", value)
|
||||
gen_log.warning("Tampered cookie %r", value)
|
||||
return None
|
||||
try:
|
||||
return base64.b64decode(parts[0])
|
||||
|
||||
@@ -23,13 +23,13 @@ from __future__ import absolute_import, division, with_statement
|
||||
import array
|
||||
import functools
|
||||
import hashlib
|
||||
import logging
|
||||
import struct
|
||||
import time
|
||||
import base64
|
||||
import tornado.escape
|
||||
import tornado.web
|
||||
|
||||
from tornado.log import gen_log, app_log
|
||||
from tornado.util import bytes_type, b
|
||||
|
||||
|
||||
@@ -172,6 +172,14 @@ class WebSocketHandler(tornado.web.RequestHandler):
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def ping(self, data):
|
||||
"""Send ping frame to the remote end."""
|
||||
self.ws_connection.write_ping(data)
|
||||
|
||||
def on_pong(self, data):
|
||||
"""Invoked when the response to a ping frame is received."""
|
||||
pass
|
||||
|
||||
def on_close(self):
|
||||
"""Invoked when the WebSocket is closed."""
|
||||
pass
|
||||
@@ -257,7 +265,7 @@ class WebSocketProtocol(object):
|
||||
try:
|
||||
return callback(*args, **kwargs)
|
||||
except Exception:
|
||||
logging.error("Uncaught exception in %s",
|
||||
app_log.error("Uncaught exception in %s",
|
||||
self.request.path, exc_info=True)
|
||||
self._abort()
|
||||
return wrapper
|
||||
@@ -289,7 +297,7 @@ class WebSocketProtocol76(WebSocketProtocol):
|
||||
try:
|
||||
self._handle_websocket_headers()
|
||||
except ValueError:
|
||||
logging.debug("Malformed WebSocket request received")
|
||||
gen_log.debug("Malformed WebSocket request received")
|
||||
self._abort()
|
||||
return
|
||||
|
||||
@@ -344,7 +352,7 @@ class WebSocketProtocol76(WebSocketProtocol):
|
||||
try:
|
||||
challenge_response = self.challenge_response(challenge)
|
||||
except ValueError:
|
||||
logging.debug("Malformed key data in WebSocket request")
|
||||
gen_log.debug("Malformed key data in WebSocket request")
|
||||
self._abort()
|
||||
return
|
||||
self._write_response(challenge_response)
|
||||
@@ -420,6 +428,10 @@ class WebSocketProtocol76(WebSocketProtocol):
|
||||
assert isinstance(message, bytes_type)
|
||||
self.stream.write(b("\x00") + message + b("\xff"))
|
||||
|
||||
def write_ping(self, data):
|
||||
"""Send ping frame."""
|
||||
raise ValueError("Ping messages not supported by this version of websockets")
|
||||
|
||||
def close(self):
|
||||
"""Closes the WebSocket connection."""
|
||||
if not self.server_terminated:
|
||||
@@ -457,7 +469,7 @@ class WebSocketProtocol13(WebSocketProtocol):
|
||||
self._handle_websocket_headers()
|
||||
self._accept_connection()
|
||||
except ValueError:
|
||||
logging.debug("Malformed WebSocket request received")
|
||||
gen_log.debug("Malformed WebSocket request received")
|
||||
self._abort()
|
||||
return
|
||||
|
||||
@@ -525,6 +537,11 @@ class WebSocketProtocol13(WebSocketProtocol):
|
||||
assert isinstance(message, bytes_type)
|
||||
self._write_frame(True, opcode, message)
|
||||
|
||||
def write_ping(self, data):
|
||||
"""Send ping frame."""
|
||||
assert isinstance(data, bytes_type)
|
||||
self._write_frame(True, 0x9, data)
|
||||
|
||||
def _receive_frame(self):
|
||||
self.stream.read_bytes(2, self._on_frame_start)
|
||||
|
||||
@@ -632,7 +649,7 @@ class WebSocketProtocol13(WebSocketProtocol):
|
||||
self._write_frame(True, 0xA, data)
|
||||
elif opcode == 0xA:
|
||||
# Pong
|
||||
pass
|
||||
self.async_callback(self.handler.on_pong)(data)
|
||||
else:
|
||||
self._abort()
|
||||
|
||||
@@ -651,4 +668,4 @@ class WebSocketProtocol13(WebSocketProtocol):
|
||||
# Give the client a few seconds to complete a clean shutdown,
|
||||
# otherwise just close the connection.
|
||||
self._waiting = self.stream.io_loop.add_timeout(
|
||||
time.time() + 5, self._abort)
|
||||
self.stream.io_loop.time() + 5, self._abort)
|
||||
|
||||
@@ -33,7 +33,6 @@ from __future__ import absolute_import, division, with_statement
|
||||
|
||||
import Cookie
|
||||
import httplib
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import tornado
|
||||
@@ -41,6 +40,7 @@ import urllib
|
||||
|
||||
from tornado import escape
|
||||
from tornado import httputil
|
||||
from tornado.log import access_log
|
||||
from tornado import web
|
||||
from tornado.escape import native_str, utf8, parse_qs_bytes
|
||||
from tornado.util import b, bytes_type
|
||||
@@ -114,8 +114,8 @@ class WSGIApplication(web.Application):
|
||||
def __call__(self, environ, start_response):
|
||||
handler = web.Application.__call__(self, HTTPRequest(environ))
|
||||
assert handler._finished
|
||||
status = str(handler._status_code) + " " + \
|
||||
httplib.responses[handler._status_code]
|
||||
reason = handler._reason
|
||||
status = str(handler._status_code) + " " + reason
|
||||
headers = handler._headers.items() + handler._list_headers
|
||||
if hasattr(handler, "_new_cookie"):
|
||||
for cookie in handler._new_cookie.values():
|
||||
@@ -137,11 +137,8 @@ class HTTPRequest(object):
|
||||
self.query = environ.get("QUERY_STRING", "")
|
||||
if self.query:
|
||||
self.uri += "?" + self.query
|
||||
arguments = parse_qs_bytes(native_str(self.query))
|
||||
for name, values in arguments.iteritems():
|
||||
values = [v for v in values if v]
|
||||
if values:
|
||||
self.arguments[name] = values
|
||||
self.arguments = parse_qs_bytes(native_str(self.query),
|
||||
keep_blank_values=True)
|
||||
self.version = "HTTP/1.1"
|
||||
self.headers = httputil.HTTPHeaders()
|
||||
if environ.get("CONTENT_TYPE"):
|
||||
@@ -248,10 +245,11 @@ class WSGIContainer(object):
|
||||
headers = data["headers"]
|
||||
header_set = set(k.lower() for (k, v) in headers)
|
||||
body = escape.utf8(body)
|
||||
if "content-length" not in header_set:
|
||||
headers.append(("Content-Length", str(len(body))))
|
||||
if "content-type" not in header_set:
|
||||
headers.append(("Content-Type", "text/html; charset=UTF-8"))
|
||||
if status_code != 304:
|
||||
if "content-length" not in header_set:
|
||||
headers.append(("Content-Length", str(len(body))))
|
||||
if "content-type" not in header_set:
|
||||
headers.append(("Content-Type", "text/html; charset=UTF-8"))
|
||||
if "server" not in header_set:
|
||||
headers.append(("Server", "TornadoServer/%s" % tornado.version))
|
||||
|
||||
@@ -302,11 +300,11 @@ class WSGIContainer(object):
|
||||
|
||||
def _log(self, status_code, request):
|
||||
if status_code < 400:
|
||||
log_method = logging.info
|
||||
log_method = access_log.info
|
||||
elif status_code < 500:
|
||||
log_method = logging.warning
|
||||
log_method = access_log.warning
|
||||
else:
|
||||
log_method = logging.error
|
||||
log_method = access_log.error
|
||||
request_time = 1000.0 * request.request_time()
|
||||
summary = request.method + " " + request.uri + " (" + \
|
||||
request.remote_ip + ")"
|
||||
|
||||
Reference in New Issue
Block a user