Merge branch 'refs/heads/develop'
This commit is contained in:
@@ -1,6 +1,5 @@
|
||||
from flask.blueprints import Blueprint
|
||||
from flask.helpers import url_for
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.web import RequestHandler, asynchronous
|
||||
from werkzeug.utils import redirect
|
||||
|
||||
@@ -11,7 +10,11 @@ api_nonblock = {}
|
||||
|
||||
|
||||
class NonBlockHandler(RequestHandler):
|
||||
stoppers = []
|
||||
|
||||
def __init__(self, application, request, **kwargs):
|
||||
cls = NonBlockHandler
|
||||
cls.stoppers = []
|
||||
super(NonBlockHandler, self).__init__(application, request, **kwargs)
|
||||
|
||||
@asynchronous
|
||||
def get(self, route):
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
from base64 import b32decode, b16encode
|
||||
from couchpotato.core.event import addEvent
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
from couchpotato.core.providers.base import Provider
|
||||
import random
|
||||
import re
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class Downloader(Plugin):
|
||||
class Downloader(Provider):
|
||||
|
||||
type = []
|
||||
http_time_between_calls = 0
|
||||
|
||||
torrent_sources = [
|
||||
'http://torrage.com/torrent/%s.torrent',
|
||||
|
||||
@@ -25,6 +25,7 @@ config = [{
|
||||
},
|
||||
{
|
||||
'name': 'password',
|
||||
'type': 'password',
|
||||
'description': 'Default NZBGet password is <i>tegbzn6789</i>',
|
||||
},
|
||||
{
|
||||
|
||||
40
couchpotato/core/downloaders/nzbvortex/__init__.py
Normal file
40
couchpotato/core/downloaders/nzbvortex/__init__.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from .main import NZBVortex
|
||||
|
||||
def start():
|
||||
return NZBVortex()
|
||||
|
||||
config = [{
|
||||
'name': 'nzbvortex',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'downloaders',
|
||||
'name': 'nzbvortex',
|
||||
'label': 'NZBVortex',
|
||||
'description': 'Send NZBs to your NZBVortex app.',
|
||||
'wizard': True,
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
'default': 0,
|
||||
'type': 'enabler',
|
||||
'radio_group': 'nzb',
|
||||
},
|
||||
{
|
||||
'name': 'host',
|
||||
'default': 'https://localhost:4321',
|
||||
},
|
||||
{
|
||||
'name': 'api_key',
|
||||
'label': 'Api Key',
|
||||
},
|
||||
{
|
||||
'name': 'manual',
|
||||
'default': False,
|
||||
'type': 'bool',
|
||||
'advanced': True,
|
||||
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
|
||||
},
|
||||
],
|
||||
}
|
||||
],
|
||||
}]
|
||||
161
couchpotato/core/downloaders/nzbvortex/main.py
Normal file
161
couchpotato/core/downloaders/nzbvortex/main.py
Normal file
@@ -0,0 +1,161 @@
|
||||
from base64 import b64encode
|
||||
from couchpotato.core.downloaders.base import Downloader
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode, ss
|
||||
from couchpotato.core.helpers.variable import cleanHost
|
||||
from couchpotato.core.logger import CPLog
|
||||
from urllib2 import URLError
|
||||
from uuid import uuid4
|
||||
import hashlib
|
||||
import httplib
|
||||
import json
|
||||
import socket
|
||||
import ssl
|
||||
import sys
|
||||
import traceback
|
||||
import urllib2
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
class NZBVortex(Downloader):
|
||||
|
||||
type = ['nzb']
|
||||
api_level = None
|
||||
session_id = None
|
||||
|
||||
def download(self, data = {}, movie = {}, manual = False, filedata = None):
|
||||
|
||||
if self.isDisabled(manual) or not self.isCorrectType(data.get('type')) or not self.getApiLevel():
|
||||
return
|
||||
|
||||
# Send the nzb
|
||||
try:
|
||||
nzb_filename = self.createFileName(data, filedata, movie)
|
||||
self.call('nzb/add', params = {'file': (ss(nzb_filename), filedata)}, multipart = True)
|
||||
|
||||
return True
|
||||
except:
|
||||
log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
|
||||
return False
|
||||
|
||||
def getAllDownloadStatus(self):
|
||||
|
||||
if self.isDisabled(manual = False):
|
||||
return False
|
||||
|
||||
raw_statuses = self.call('nzb')
|
||||
|
||||
statuses = []
|
||||
for item in raw_statuses.get('nzbs', []):
|
||||
|
||||
# Check status
|
||||
status = 'busy'
|
||||
if item['state'] == 20:
|
||||
status = 'completed'
|
||||
elif item['state'] in [21, 22, 24]:
|
||||
status = 'failed'
|
||||
|
||||
statuses.append({
|
||||
'id': item['id'],
|
||||
'name': item['uiTitle'],
|
||||
'status': status,
|
||||
'original_status': item['state'],
|
||||
'timeleft':-1,
|
||||
})
|
||||
|
||||
return statuses
|
||||
|
||||
def login(self):
|
||||
|
||||
nonce = self.call('auth/nonce', auth = False).get('authNonce')
|
||||
cnonce = uuid4().hex
|
||||
hashed = b64encode(hashlib.sha256('%s:%s:%s' % (nonce, cnonce, self.conf('api_key'))).digest())
|
||||
|
||||
params = {
|
||||
'nonce': nonce,
|
||||
'cnonce': cnonce,
|
||||
'hash': hashed
|
||||
}
|
||||
|
||||
login_data = self.call('auth/login', parameters = params, auth = False)
|
||||
|
||||
# Save for later
|
||||
if login_data.get('loginResult') == 'successful':
|
||||
self.session_id = login_data.get('sessionID')
|
||||
return True
|
||||
|
||||
log.error('Login failed, please check you api-key')
|
||||
return False
|
||||
|
||||
|
||||
def call(self, call, parameters = {}, repeat = False, auth = True, *args, **kwargs):
|
||||
|
||||
# Login first
|
||||
if not self.session_id and auth:
|
||||
self.login()
|
||||
|
||||
# Always add session id to request
|
||||
if self.session_id:
|
||||
parameters['sessionid'] = self.session_id
|
||||
|
||||
params = tryUrlencode(parameters)
|
||||
|
||||
url = cleanHost(self.conf('host')) + 'api/' + call
|
||||
url_opener = urllib2.build_opener(HTTPSHandler())
|
||||
|
||||
try:
|
||||
data = self.urlopen('%s?%s' % (url, params), opener = url_opener, *args, **kwargs)
|
||||
|
||||
if data:
|
||||
return json.loads(data)
|
||||
except URLError, e:
|
||||
if hasattr(e, 'code') and e.code == 403:
|
||||
# Try login and do again
|
||||
if not repeat:
|
||||
self.login()
|
||||
return self.call(call, parameters = parameters, repeat = True, *args, **kwargs)
|
||||
|
||||
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
|
||||
except:
|
||||
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
|
||||
|
||||
return {}
|
||||
|
||||
def getApiLevel(self):
|
||||
|
||||
if not self.api_level:
|
||||
|
||||
url = cleanHost(self.conf('host')) + 'api/app/apilevel'
|
||||
url_opener = urllib2.build_opener(HTTPSHandler())
|
||||
|
||||
try:
|
||||
data = self.urlopen(url, opener = url_opener, show_error = False)
|
||||
self.api_level = float(json.loads(data).get('apilevel'))
|
||||
except URLError, e:
|
||||
if hasattr(e, 'code') and e.code == 403:
|
||||
log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher')
|
||||
else:
|
||||
log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1))
|
||||
|
||||
return self.api_level
|
||||
|
||||
|
||||
class HTTPSConnection(httplib.HTTPSConnection):
|
||||
def __init__(self, *args, **kwargs):
|
||||
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
|
||||
|
||||
def connect(self):
|
||||
sock = socket.create_connection((self.host, self.port), self.timeout)
|
||||
if sys.version_info < (2, 6, 7):
|
||||
if hasattr(self, '_tunnel_host'):
|
||||
self.sock = sock
|
||||
self._tunnel()
|
||||
else:
|
||||
if self._tunnel_host:
|
||||
self.sock = sock
|
||||
self._tunnel()
|
||||
|
||||
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version = ssl.PROTOCOL_TLSv1)
|
||||
|
||||
class HTTPSHandler(urllib2.HTTPSHandler):
|
||||
def https_open(self, req):
|
||||
return self.do_open(HTTPSConnection, req)
|
||||
@@ -1,5 +1,5 @@
|
||||
from couchpotato.core.downloaders.base import Downloader
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode, ss
|
||||
from couchpotato.core.helpers.variable import cleanHost, mergeDicts
|
||||
from couchpotato.core.logger import CPLog
|
||||
from urllib2 import URLError
|
||||
@@ -41,7 +41,7 @@ class Sabnzbd(Downloader):
|
||||
|
||||
try:
|
||||
if params.get('mode') is 'addfile':
|
||||
sab = self.urlopen(url, timeout = 60, params = {'nzbfile': (nzb_filename, filedata)}, multipart = True, show_error = False)
|
||||
sab = self.urlopen(url, timeout = 60, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True, show_error = False)
|
||||
else:
|
||||
sab = self.urlopen(url, timeout = 60, show_error = False)
|
||||
except URLError:
|
||||
|
||||
@@ -12,7 +12,7 @@ class Automation(Plugin):
|
||||
|
||||
fireEvent('schedule.interval', 'automation.add_movies', self.addMovies, hours = self.conf('hour', default = 12))
|
||||
|
||||
if not Env.get('dev'):
|
||||
if Env.get('dev'):
|
||||
addEvent('app.load', self.addMovies)
|
||||
|
||||
def addMovies(self):
|
||||
|
||||
@@ -98,6 +98,7 @@ class Plugin(object):
|
||||
|
||||
# http request
|
||||
def urlopen(self, url, timeout = 30, params = None, headers = None, opener = None, multipart = False, show_error = True):
|
||||
url = ss(url)
|
||||
|
||||
if not headers: headers = {}
|
||||
if not params: params = {}
|
||||
@@ -129,8 +130,11 @@ class Plugin(object):
|
||||
log.info('Opening multipart url: %s, params: %s', (url, [x for x in params.iterkeys()] if isinstance(params, dict) else 'with data'))
|
||||
request = urllib2.Request(url, params, headers)
|
||||
|
||||
cookies = cookielib.CookieJar()
|
||||
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler)
|
||||
if opener:
|
||||
opener.add_handler(MultipartPostHandler())
|
||||
else:
|
||||
cookies = cookielib.CookieJar()
|
||||
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler)
|
||||
|
||||
response = opener.open(request, timeout = timeout)
|
||||
else:
|
||||
|
||||
@@ -89,7 +89,7 @@ class Scanner(Plugin):
|
||||
'()([ab])(\.....?)$' #*a.mkv
|
||||
]
|
||||
|
||||
cp_imdb = '(\.cp\((?P<id>tt[0-9{7}]+)\))'
|
||||
cp_imdb = '(.cp.(?P<id>tt[0-9{7}]+).)'
|
||||
|
||||
def __init__(self):
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
from couchpotato.core.event import addEvent, fireEvent
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
from couchpotato.core.providers.base import Provider
|
||||
from couchpotato.environment import Env
|
||||
import time
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class Automation(Plugin):
|
||||
class Automation(Provider):
|
||||
|
||||
enabled_option = 'automation_enabled'
|
||||
|
||||
@@ -19,6 +19,9 @@ class Automation(Plugin):
|
||||
|
||||
def _getMovies(self):
|
||||
|
||||
if self.isDisabled():
|
||||
return
|
||||
|
||||
if not self.canCheck():
|
||||
log.debug('Just checked, skipping %s', self.getName())
|
||||
return []
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.helpers.variable import md5, tryInt
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.automation.base import Automation
|
||||
import xml.etree.ElementTree as XMLTree
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
@@ -14,32 +13,24 @@ class Bluray(Automation, RSS):
|
||||
|
||||
def getIMDBids(self):
|
||||
|
||||
if self.isDisabled():
|
||||
return
|
||||
|
||||
movies = []
|
||||
|
||||
cache_key = 'bluray.%s' % md5(self.rss_url)
|
||||
rss_data = self.getCache(cache_key, self.rss_url)
|
||||
data = XMLTree.fromstring(rss_data)
|
||||
rss_movies = self.getRSSData(self.rss_url)
|
||||
|
||||
if data is not None:
|
||||
rss_movies = self.getElements(data, 'channel/item')
|
||||
for movie in rss_movies:
|
||||
name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip()
|
||||
year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip()
|
||||
|
||||
for movie in rss_movies:
|
||||
name = self.getTextElement(movie, "title").lower().split("blu-ray")[0].strip("(").rstrip()
|
||||
year = self.getTextElement(movie, "description").split("|")[1].strip("(").strip()
|
||||
if not name.find('/') == -1: # make sure it is not a double movie release
|
||||
continue
|
||||
|
||||
if not name.find("/") == -1: # make sure it is not a double movie release
|
||||
continue
|
||||
if tryInt(year) < self.getMinimal('year'):
|
||||
continue
|
||||
|
||||
if tryInt(year) < self.getMinimal('year'):
|
||||
continue
|
||||
imdb = self.search(name, year)
|
||||
|
||||
imdb = self.search(name, year)
|
||||
|
||||
if imdb:
|
||||
if self.isMinimalMovie(imdb):
|
||||
movies.append(imdb['imdb'])
|
||||
if imdb:
|
||||
if self.isMinimalMovie(imdb):
|
||||
movies.append(imdb['imdb'])
|
||||
|
||||
return movies
|
||||
|
||||
@@ -8,7 +8,4 @@ class CP(Automation):
|
||||
|
||||
def getMovies(self):
|
||||
|
||||
if self.isDisabled():
|
||||
return
|
||||
|
||||
return []
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.helpers.variable import md5, getImdb, splitString, tryInt
|
||||
from couchpotato.core.helpers.variable import getImdb, splitString, tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.automation.base import Automation
|
||||
import traceback
|
||||
@@ -13,9 +13,6 @@ class IMDB(Automation, RSS):
|
||||
|
||||
def getIMDBids(self):
|
||||
|
||||
if self.isDisabled():
|
||||
return
|
||||
|
||||
movies = []
|
||||
|
||||
enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]
|
||||
@@ -29,8 +26,7 @@ class IMDB(Automation, RSS):
|
||||
continue
|
||||
|
||||
try:
|
||||
cache_key = 'imdb.rss.%s' % md5(url)
|
||||
rss_data = self.getCache(cache_key, url)
|
||||
rss_data = self.getHTMLData(url)
|
||||
imdbs = getImdb(rss_data, multiple = True)
|
||||
|
||||
for imdb in imdbs:
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.helpers.variable import md5
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.automation.base import Automation
|
||||
import datetime
|
||||
import xml.etree.ElementTree as XMLTree
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
@@ -15,25 +13,17 @@ class Kinepolis(Automation, RSS):
|
||||
|
||||
def getIMDBids(self):
|
||||
|
||||
if self.isDisabled():
|
||||
return
|
||||
|
||||
movies = []
|
||||
|
||||
cache_key = 'kinepolis.%s' % md5(self.rss_url)
|
||||
rss_data = self.getCache(cache_key, self.rss_url)
|
||||
data = XMLTree.fromstring(rss_data)
|
||||
rss_movies = self.getRSSData(self.rss_url)
|
||||
|
||||
if data is not None:
|
||||
rss_movies = self.getElements(data, 'channel/item')
|
||||
for movie in rss_movies:
|
||||
name = self.getTextElement(movie, 'title')
|
||||
year = datetime.datetime.now().strftime('%Y')
|
||||
|
||||
for movie in rss_movies:
|
||||
name = self.getTextElement(movie, "title")
|
||||
year = datetime.datetime.now().strftime("%Y")
|
||||
imdb = self.search(name, year)
|
||||
|
||||
imdb = self.search(name, year)
|
||||
|
||||
if imdb and self.isMinimalMovie(imdb):
|
||||
movies.append(imdb['imdb'])
|
||||
if imdb and self.isMinimalMovie(imdb):
|
||||
movies.append(imdb['imdb'])
|
||||
|
||||
return movies
|
||||
|
||||
23
couchpotato/core/providers/automation/moviemeter/__init__.py
Normal file
23
couchpotato/core/providers/automation/moviemeter/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from .main import Moviemeter
|
||||
|
||||
def start():
|
||||
return Moviemeter()
|
||||
|
||||
config = [{
|
||||
'name': 'moviemeter',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'automation',
|
||||
'name': 'moviemeter_automation',
|
||||
'label': 'Moviemeter',
|
||||
'description': 'Imports movies from the current top 10 of moviemeter.nl. (uses minimal requirements)',
|
||||
'options': [
|
||||
{
|
||||
'name': 'automation_enabled',
|
||||
'default': False,
|
||||
'type': 'enabler',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}]
|
||||
28
couchpotato/core/providers/automation/moviemeter/main.py
Normal file
28
couchpotato/core/providers/automation/moviemeter/main.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.automation.base import Automation
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class Moviemeter(Automation, RSS):
|
||||
|
||||
interval = 1800
|
||||
rss_url = 'http://www.moviemeter.nl/rss/cinema'
|
||||
|
||||
def getIMDBids(self):
|
||||
|
||||
movies = []
|
||||
|
||||
rss_movies = self.getRSSData(self.rss_url)
|
||||
|
||||
for movie in rss_movies:
|
||||
|
||||
name_year = fireEvent('scanner.name_year', self.getTextElement(movie, 'title'), single = True)
|
||||
imdb = self.search(name_year.get('name'), name_year.get('year'))
|
||||
|
||||
if imdb and self.isMinimalMovie(imdb):
|
||||
movies.append(imdb['imdb'])
|
||||
|
||||
return movies
|
||||
@@ -1,11 +1,8 @@
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.helpers.variable import md5
|
||||
from couchpotato.core.helpers.variable import tryInt, splitString
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.automation.base import Automation
|
||||
from xml.etree.ElementTree import ParseError
|
||||
import traceback
|
||||
import xml.etree.ElementTree as XMLTree
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
@@ -16,39 +13,27 @@ class MoviesIO(Automation, RSS):
|
||||
|
||||
def getIMDBids(self):
|
||||
|
||||
if self.isDisabled():
|
||||
return
|
||||
|
||||
movies = []
|
||||
|
||||
enablers = self.conf('automation_urls_use').split(',')
|
||||
enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]
|
||||
|
||||
index = -1
|
||||
for rss_url in self.conf('automation_urls').split(','):
|
||||
for rss_url in splitString(self.conf('automation_urls')):
|
||||
|
||||
index += 1
|
||||
if not enablers[index]:
|
||||
continue
|
||||
|
||||
try:
|
||||
cache_key = 'imdb.rss.%s' % md5(rss_url)
|
||||
rss_movies = self.getRSSData(rss_url, headers = {'Referer': ''})
|
||||
|
||||
rss_data = self.getCache(cache_key, rss_url, headers = {'Referer': ''})
|
||||
data = XMLTree.fromstring(rss_data)
|
||||
rss_movies = self.getElements(data, 'channel/item')
|
||||
for movie in rss_movies:
|
||||
|
||||
for movie in rss_movies:
|
||||
nameyear = fireEvent('scanner.name_year', self.getTextElement(movie, 'title'), single = True)
|
||||
imdb = self.search(nameyear.get('name'), nameyear.get('year'), imdb_only = True)
|
||||
|
||||
nameyear = fireEvent('scanner.name_year', self.getTextElement(movie, "title"), single = True)
|
||||
imdb = self.search(nameyear.get('name'), nameyear.get('year'), imdb_only = True)
|
||||
if not imdb:
|
||||
continue
|
||||
|
||||
if not imdb:
|
||||
continue
|
||||
|
||||
movies.append(imdb)
|
||||
except ParseError:
|
||||
log.debug('Failed loading Movies.io watchlist, probably empty: %s', (rss_url))
|
||||
except:
|
||||
log.error('Failed loading Movies.io watchlist: %s %s', (rss_url, traceback.format_exc()))
|
||||
movies.append(imdb)
|
||||
|
||||
return movies
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
from couchpotato.core.event import addEvent
|
||||
from couchpotato.core.helpers.variable import md5, sha1
|
||||
from couchpotato.core.helpers.variable import sha1
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.automation.base import Automation
|
||||
import base64
|
||||
import json
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
@@ -25,9 +24,6 @@ class Trakt(Automation):
|
||||
|
||||
def getIMDBids(self):
|
||||
|
||||
if self.isDisabled():
|
||||
return
|
||||
|
||||
movies = []
|
||||
for movie in self.getWatchlist():
|
||||
movies.append(movie.get('imdb_id'))
|
||||
@@ -38,22 +34,11 @@ class Trakt(Automation):
|
||||
method = (self.urls['watchlist'] % self.conf('automation_api_key')) + self.conf('automation_username')
|
||||
return self.call(method)
|
||||
|
||||
|
||||
def call(self, method_url):
|
||||
|
||||
try:
|
||||
if self.conf('automation_password'):
|
||||
headers = {
|
||||
'Authorization': 'Basic %s' % base64.encodestring('%s:%s' % (self.conf('automation_username'), self.conf('automation_password')))[:-1]
|
||||
}
|
||||
else:
|
||||
headers = {}
|
||||
headers = {}
|
||||
if self.conf('automation_password'):
|
||||
headers['Authorization'] = 'Basic %s' % base64.encodestring('%s:%s' % (self.conf('automation_username'), self.conf('automation_password')))[:-1]
|
||||
|
||||
cache_key = 'trakt.%s' % md5(method_url)
|
||||
json_string = self.getCache(cache_key, self.urls['base'] + method_url, headers = headers)
|
||||
if json_string:
|
||||
return json.loads(json_string)
|
||||
except:
|
||||
log.error('Failed to get data from trakt, check your login.')
|
||||
|
||||
return []
|
||||
data = self.getJsonData(self.urls['base'] + method_url, headers = headers)
|
||||
return data if data else []
|
||||
|
||||
@@ -44,6 +44,34 @@ class Provider(Plugin):
|
||||
|
||||
return self.is_available.get(host, False)
|
||||
|
||||
def getJsonData(self, url, **kwargs):
|
||||
|
||||
data = self.getCache(md5(url), url, **kwargs)
|
||||
|
||||
if data:
|
||||
try:
|
||||
return json.loads(data)
|
||||
except:
|
||||
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
|
||||
|
||||
return []
|
||||
|
||||
def getRSSData(self, url, **kwargs):
|
||||
|
||||
data = self.getCache(md5(url), url, **kwargs)
|
||||
|
||||
if data:
|
||||
try:
|
||||
data = XMLTree.fromstring(data)
|
||||
return self.getElements(data, 'channel/item')
|
||||
except:
|
||||
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
|
||||
|
||||
return []
|
||||
|
||||
def getHTMLData(self, url, **kwargs):
|
||||
return self.getCache(md5(url), url, **kwargs)
|
||||
|
||||
|
||||
class YarrProvider(Provider):
|
||||
|
||||
@@ -106,11 +134,11 @@ class YarrProvider(Provider):
|
||||
return []
|
||||
|
||||
# Create result container
|
||||
imdb_result = hasattr(self, '_search')
|
||||
results = ResultList(self, movie, quality, imdb_result = imdb_result)
|
||||
imdb_results = hasattr(self, '_search')
|
||||
results = ResultList(self, movie, quality, imdb_results = imdb_results)
|
||||
|
||||
# Do search based on imdb id
|
||||
if imdb_result:
|
||||
if imdb_results:
|
||||
self._search(movie, quality, results)
|
||||
# Search possible titles
|
||||
else:
|
||||
@@ -165,34 +193,6 @@ class YarrProvider(Provider):
|
||||
|
||||
return [self.cat_backup_id]
|
||||
|
||||
def getJsonData(self, url, **kwargs):
|
||||
|
||||
data = self.getCache(md5(url), url, **kwargs)
|
||||
|
||||
if data:
|
||||
try:
|
||||
return json.loads(data)
|
||||
except:
|
||||
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
|
||||
|
||||
return []
|
||||
|
||||
def getRSSData(self, url, **kwargs):
|
||||
|
||||
data = self.getCache(md5(url), url, **kwargs)
|
||||
|
||||
if data:
|
||||
try:
|
||||
data = XMLTree.fromstring(data)
|
||||
return self.getElements(data, 'channel/item')
|
||||
except:
|
||||
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
|
||||
|
||||
return []
|
||||
|
||||
def getHTMLData(self, url, **kwargs):
|
||||
return self.getCache(md5(url), url, **kwargs)
|
||||
|
||||
|
||||
class ResultList(list):
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
from .main import IMDBAPI
|
||||
|
||||
def start():
|
||||
return IMDBAPI()
|
||||
|
||||
config = []
|
||||
6
couchpotato/core/providers/movie/omdbapi/__init__.py
Normal file
6
couchpotato/core/providers/movie/omdbapi/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from .main import OMDBAPI
|
||||
|
||||
def start():
|
||||
return OMDBAPI()
|
||||
|
||||
config = []
|
||||
@@ -10,11 +10,11 @@ import traceback
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class IMDBAPI(MovieProvider):
|
||||
class OMDBAPI(MovieProvider):
|
||||
|
||||
urls = {
|
||||
'search': 'http://www.imdbapi.com/?%s',
|
||||
'info': 'http://www.imdbapi.com/?i=%s',
|
||||
'search': 'http://www.omdbapi.com/?%s',
|
||||
'info': 'http://www.omdbapi.com/?i=%s',
|
||||
}
|
||||
|
||||
http_time_between_calls = 0
|
||||
@@ -32,7 +32,7 @@ class IMDBAPI(MovieProvider):
|
||||
'name': q
|
||||
}
|
||||
|
||||
cache_key = 'imdbapi.cache.%s' % q
|
||||
cache_key = 'omdbapi.cache.%s' % q
|
||||
cached = self.getCache(cache_key, self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')}), timeout = 3)
|
||||
|
||||
if cached:
|
||||
@@ -50,7 +50,7 @@ class IMDBAPI(MovieProvider):
|
||||
if not identifier:
|
||||
return {}
|
||||
|
||||
cache_key = 'imdbapi.cache.%s' % identifier
|
||||
cache_key = 'omdbapi.cache.%s' % identifier
|
||||
cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3)
|
||||
|
||||
if cached:
|
||||
@@ -13,7 +13,7 @@ config = [{
|
||||
'order': 10,
|
||||
'description': 'Enable <a href="http://newznab.com/" target="_blank">NewzNab providers</a> such as <a href="https://nzb.su" target="_blank">NZB.su</a>, \
|
||||
<a href="https://nzbs.org" target="_blank">NZBs.org</a>, <a href="http://dognzb.cr/" target="_blank">DOGnzb.cr</a>, \
|
||||
<a href="https://github.com/spotweb/spotweb" target="_blank">Spotweb</a>',
|
||||
<a href="https://github.com/spotweb/spotweb" target="_blank">Spotweb</a> or <a href="https://nzbgeek.info/" target="_blank">NZBGeek</a>',
|
||||
'wizard': True,
|
||||
'options': [
|
||||
{
|
||||
@@ -22,16 +22,16 @@ config = [{
|
||||
},
|
||||
{
|
||||
'name': 'use',
|
||||
'default': '0,0,0'
|
||||
'default': '0,0,0,0'
|
||||
},
|
||||
{
|
||||
'name': 'host',
|
||||
'default': 'nzb.su,dognzb.cr,nzbs.org',
|
||||
'default': 'nzb.su,dognzb.cr,nzbs.org,https://index.nzbgeek.info',
|
||||
'description': 'The hostname of your newznab provider',
|
||||
},
|
||||
{
|
||||
'name': 'api_key',
|
||||
'default': ',,',
|
||||
'default': ',,,',
|
||||
'label': 'Api Key',
|
||||
'description': 'Can be found on your profile page',
|
||||
'type': 'combined',
|
||||
|
||||
@@ -29,7 +29,7 @@ class Newznab(NZBProvider, RSS):
|
||||
def search(self, movie, quality):
|
||||
hosts = self.getHosts()
|
||||
|
||||
results = ResultList(self, movie, quality, imdb_result = True)
|
||||
results = ResultList(self, movie, quality, imdb_results = True)
|
||||
|
||||
for host in hosts:
|
||||
if self.isDisabled(host):
|
||||
|
||||
@@ -10,7 +10,7 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'subtab': 'nzb_providers',
|
||||
'name': 'nzbX',
|
||||
'description': 'Free provider, less accurate. See <a href="https://www.nzbx.co/">nzbX</a>',
|
||||
'description': 'Free provider. See <a href="https://www.nzbx.co/">nzbX</a>',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -2,6 +2,7 @@ from couchpotato.core.helpers.encoding import tryUrlencode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.nzb.base import NZBProvider
|
||||
from couchpotato.environment import Env
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
@@ -22,7 +23,7 @@ class Nzbx(NZBProvider):
|
||||
'q': movie['library']['identifier'].replace('tt', ''),
|
||||
'sf': quality.get('size_min'),
|
||||
})
|
||||
nzbs = self.getJsonData(self.urls['search'] % arguments)
|
||||
nzbs = self.getJsonData(self.urls['search'] % arguments, headers = {'User-Agent': Env.getIdentifier()})
|
||||
|
||||
for nzb in nzbs:
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ log = CPLog(__name__)
|
||||
class OMGWTFNZBs(NZBProvider, RSS):
|
||||
|
||||
urls = {
|
||||
'search': 'http://rss.omgwtfnzbs.com/rss-search.php?%s',
|
||||
'search': 'http://rss.omgwtfnzbs.org/rss-search.php?%s',
|
||||
}
|
||||
|
||||
http_time_between_calls = 1 #seconds
|
||||
|
||||
@@ -4,7 +4,6 @@ from couchpotato.api import api, NonBlockHandler
|
||||
from couchpotato.core.event import fireEventAsync, fireEvent
|
||||
from couchpotato.core.helpers.variable import getDataDir, tryInt
|
||||
from logging import handlers
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.web import Application, FallbackHandler
|
||||
from tornado.wsgi import WSGIContainer
|
||||
from werkzeug.contrib.cache import FileSystemCache
|
||||
@@ -231,6 +230,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
|
||||
fireEventAsync('app.load')
|
||||
|
||||
# Go go go!
|
||||
from tornado.ioloop import IOLoop
|
||||
web_container = WSGIContainer(app)
|
||||
web_container._log = _log
|
||||
loop = IOLoop.instance()
|
||||
|
||||
@@ -13,7 +13,7 @@ var ApiClass = new Class({
|
||||
return new Request[r_type](Object.merge({
|
||||
'callbackKey': 'callback_func',
|
||||
'method': 'get',
|
||||
'url': self.createUrl(type),
|
||||
'url': self.createUrl(type, {'t': randomString()}),
|
||||
}, options)).send()
|
||||
},
|
||||
|
||||
|
||||
9
init/freebsd
Normal file → Executable file
9
init/freebsd
Normal file → Executable file
@@ -25,6 +25,9 @@
|
||||
name="couchpotato"
|
||||
rcvar=${name}_enable
|
||||
|
||||
# Required, for some reason, to find all our binaries when starting via service.
|
||||
PATH="/usr/bin:/usr/local/bin:$PATH"
|
||||
|
||||
load_rc_config ${name}
|
||||
|
||||
: ${couchpotato_enable:="NO"}
|
||||
@@ -36,9 +39,9 @@ load_rc_config ${name}
|
||||
|
||||
WGET="/usr/local/bin/wget" # You need wget for this script to safely shutdown CouchPotato.
|
||||
if [ -e "${couchpotato_conf}" ]; then
|
||||
HOST=`grep -A14 "\[core\]" "${couchpotato_conf}"|egrep "^host"|perl -wple 's/^host = (.*)$/$1/'`
|
||||
PORT=`grep -A14 "\[core\]" "${couchpotato_conf}"|egrep "^port"|perl -wple 's/^port = (.*)$/$1/'`
|
||||
CPAPI=`grep -A14 "\[core\]" "${couchpotato_conf}"|egrep "^api_key"|perl -wple 's/^api_key = (.*)$/$1/'`
|
||||
HOST=`grep -A14 "\[core\]" "${couchpotato_conf}"|awk -F" = " '/^host/ {print $2}'`
|
||||
PORT=`grep -A14 "\[core\]" "${couchpotato_conf}"|awk -F" = " '/^port/ {print $2}'`
|
||||
CPAPI=`grep -A14 "\[core\]" "${couchpotato_conf}"|awk -F" = " '/^api_key/ {print $2}'`
|
||||
fi
|
||||
|
||||
status_cmd="${name}_status"
|
||||
|
||||
@@ -96,17 +96,18 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
|
||||
pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE
|
||||
}
|
||||
if event == pycurl.POLL_REMOVE:
|
||||
self.io_loop.remove_handler(fd)
|
||||
del self._fds[fd]
|
||||
if fd in self._fds:
|
||||
self.io_loop.remove_handler(fd)
|
||||
del self._fds[fd]
|
||||
else:
|
||||
ioloop_event = event_map[event]
|
||||
if fd not in self._fds:
|
||||
self._fds[fd] = ioloop_event
|
||||
self.io_loop.add_handler(fd, self._handle_events,
|
||||
ioloop_event)
|
||||
else:
|
||||
self._fds[fd] = ioloop_event
|
||||
else:
|
||||
self.io_loop.update_handler(fd, ioloop_event)
|
||||
self._fds[fd] = ioloop_event
|
||||
|
||||
def _set_timeout(self, msecs):
|
||||
"""Called by libcurl to schedule a timeout."""
|
||||
|
||||
@@ -194,7 +194,7 @@ class IOLoop(Configurable):
|
||||
def initialize(self):
|
||||
pass
|
||||
|
||||
def close(self, all_fds=False):
|
||||
def close(self, all_fds = False):
|
||||
"""Closes the IOLoop, freeing any resources used.
|
||||
|
||||
If ``all_fds`` is true, all file descriptors registered on the
|
||||
@@ -320,7 +320,7 @@ class IOLoop(Configurable):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_callback(self, callback):
|
||||
def add_callback(self, callback, *args, **kwargs):
|
||||
"""Calls the given callback on the next I/O loop iteration.
|
||||
|
||||
It is safe to call this method from any thread at any time,
|
||||
@@ -335,7 +335,7 @@ class IOLoop(Configurable):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_callback_from_signal(self, callback):
|
||||
def add_callback_from_signal(self, callback, *args, **kwargs):
|
||||
"""Calls the given callback on the next I/O loop iteration.
|
||||
|
||||
Safe for use from a Python signal handler; should not be used
|
||||
@@ -359,8 +359,7 @@ class IOLoop(Configurable):
|
||||
assert isinstance(future, IOLoop._FUTURE_TYPES)
|
||||
callback = stack_context.wrap(callback)
|
||||
future.add_done_callback(
|
||||
lambda future: self.add_callback(
|
||||
functools.partial(callback, future)))
|
||||
lambda future: self.add_callback(callback, future))
|
||||
|
||||
def _run_callback(self, callback):
|
||||
"""Runs a callback with error handling.
|
||||
@@ -382,7 +381,7 @@ class IOLoop(Configurable):
|
||||
The exception itself is not passed explicitly, but is available
|
||||
in sys.exc_info.
|
||||
"""
|
||||
app_log.error("Exception in callback %r", callback, exc_info=True)
|
||||
app_log.error("Exception in callback %r", callback, exc_info = True)
|
||||
|
||||
|
||||
|
||||
@@ -393,7 +392,7 @@ class PollIOLoop(IOLoop):
|
||||
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
|
||||
`tornado.platform.select.SelectIOLoop` (all platforms).
|
||||
"""
|
||||
def initialize(self, impl, time_func=None):
|
||||
def initialize(self, impl, time_func = None):
|
||||
super(PollIOLoop, self).initialize()
|
||||
self._impl = impl
|
||||
if hasattr(self._impl, 'fileno'):
|
||||
@@ -417,7 +416,7 @@ class PollIOLoop(IOLoop):
|
||||
lambda fd, events: self._waker.consume(),
|
||||
self.READ)
|
||||
|
||||
def close(self, all_fds=False):
|
||||
def close(self, all_fds = False):
|
||||
with self._callback_lock:
|
||||
self._closing = True
|
||||
self.remove_handler(self._waker.fileno())
|
||||
@@ -426,7 +425,7 @@ class PollIOLoop(IOLoop):
|
||||
try:
|
||||
os.close(fd)
|
||||
except Exception:
|
||||
gen_log.debug("error closing fd %s", fd, exc_info=True)
|
||||
gen_log.debug("error closing fd %s", fd, exc_info = True)
|
||||
self._waker.close()
|
||||
self._impl.close()
|
||||
|
||||
@@ -442,8 +441,8 @@ class PollIOLoop(IOLoop):
|
||||
self._events.pop(fd, None)
|
||||
try:
|
||||
self._impl.unregister(fd)
|
||||
except (OSError, IOError):
|
||||
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
|
||||
except Exception:
|
||||
gen_log.debug("Error deleting fd from IOLoop", exc_info = True)
|
||||
|
||||
def set_blocking_signal_threshold(self, seconds, action):
|
||||
if not hasattr(signal, "setitimer"):
|
||||
@@ -501,7 +500,7 @@ class PollIOLoop(IOLoop):
|
||||
# IOLoop is just started once at the beginning.
|
||||
signal.set_wakeup_fd(old_wakeup_fd)
|
||||
old_wakeup_fd = None
|
||||
except ValueError: # non-main thread
|
||||
except ValueError: # non-main thread
|
||||
pass
|
||||
|
||||
while True:
|
||||
@@ -569,17 +568,18 @@ class PollIOLoop(IOLoop):
|
||||
while self._events:
|
||||
fd, events = self._events.popitem()
|
||||
try:
|
||||
self._handlers[fd](fd, events)
|
||||
hdlr = self._handlers.get(fd)
|
||||
if hdlr: hdlr(fd, events)
|
||||
except (OSError, IOError), e:
|
||||
if e.args[0] == errno.EPIPE:
|
||||
# Happens when the client closes the connection
|
||||
pass
|
||||
else:
|
||||
app_log.error("Exception in I/O handler for fd %s",
|
||||
fd, exc_info=True)
|
||||
fd, exc_info = True)
|
||||
except Exception:
|
||||
app_log.error("Exception in I/O handler for fd %s",
|
||||
fd, exc_info=True)
|
||||
fd, exc_info = True)
|
||||
# reset the stopped flag so another start/stop pair can be issued
|
||||
self._stopped = False
|
||||
if self._blocking_signal_threshold is not None:
|
||||
@@ -609,12 +609,13 @@ class PollIOLoop(IOLoop):
|
||||
# collection pass whenever there are too many dead timeouts.
|
||||
timeout.callback = None
|
||||
|
||||
def add_callback(self, callback):
|
||||
def add_callback(self, callback, *args, **kwargs):
|
||||
with self._callback_lock:
|
||||
if self._closing:
|
||||
raise RuntimeError("IOLoop is closing")
|
||||
list_empty = not self._callbacks
|
||||
self._callbacks.append(stack_context.wrap(callback))
|
||||
self._callbacks.append(functools.partial(
|
||||
stack_context.wrap(callback), *args, **kwargs))
|
||||
if list_empty and thread.get_ident() != self._thread_ident:
|
||||
# If we're in the IOLoop's thread, we know it's not currently
|
||||
# polling. If we're not, and we added the first callback to an
|
||||
@@ -624,12 +625,12 @@ class PollIOLoop(IOLoop):
|
||||
# avoid it when we can.
|
||||
self._waker.wake()
|
||||
|
||||
def add_callback_from_signal(self, callback):
|
||||
def add_callback_from_signal(self, callback, *args, **kwargs):
|
||||
with stack_context.NullContext():
|
||||
if thread.get_ident() != self._thread_ident:
|
||||
# if the signal is handled on another thread, we can add
|
||||
# it normally (modulo the NullContext)
|
||||
self.add_callback(callback)
|
||||
self.add_callback(callback, *args, **kwargs)
|
||||
else:
|
||||
# If we're on the IOLoop's thread, we cannot use
|
||||
# the regular add_callback because it may deadlock on
|
||||
@@ -639,7 +640,8 @@ class PollIOLoop(IOLoop):
|
||||
# _callback_lock block in IOLoop.start, we may modify
|
||||
# either the old or new version of self._callbacks,
|
||||
# but either way will work.
|
||||
self._callbacks.append(stack_context.wrap(callback))
|
||||
self._callbacks.append(functools.partial(
|
||||
stack_context.wrap(callback), *args, **kwargs))
|
||||
|
||||
|
||||
class _Timeout(object):
|
||||
@@ -682,7 +684,7 @@ class PeriodicCallback(object):
|
||||
|
||||
`start` must be called after the PeriodicCallback is created.
|
||||
"""
|
||||
def __init__(self, callback, callback_time, io_loop=None):
|
||||
def __init__(self, callback, callback_time, io_loop = None):
|
||||
self.callback = callback
|
||||
if callback_time <= 0:
|
||||
raise ValueError("Periodic callback must have a positive callback_time")
|
||||
@@ -710,7 +712,7 @@ class PeriodicCallback(object):
|
||||
try:
|
||||
self.callback()
|
||||
except Exception:
|
||||
app_log.error("Error in periodic callback", exc_info=True)
|
||||
app_log.error("Error in periodic callback", exc_info = True)
|
||||
self._schedule_next()
|
||||
|
||||
def _schedule_next(self):
|
||||
|
||||
@@ -209,11 +209,19 @@ class BaseIOStream(object):
|
||||
"""Call the given callback when the stream is closed."""
|
||||
self._close_callback = stack_context.wrap(callback)
|
||||
|
||||
def close(self):
|
||||
"""Close this stream."""
|
||||
def close(self, exc_info=False):
|
||||
"""Close this stream.
|
||||
|
||||
If ``exc_info`` is true, set the ``error`` attribute to the current
|
||||
exception from `sys.exc_info()` (or if ``exc_info`` is a tuple,
|
||||
use that instead of `sys.exc_info`).
|
||||
"""
|
||||
if not self.closed():
|
||||
if any(sys.exc_info()):
|
||||
self.error = sys.exc_info()[1]
|
||||
if exc_info:
|
||||
if not isinstance(exc_info, tuple):
|
||||
exc_info = sys.exc_info()
|
||||
if any(exc_info):
|
||||
self.error = exc_info[1]
|
||||
if self._read_until_close:
|
||||
callback = self._read_callback
|
||||
self._read_callback = None
|
||||
@@ -285,7 +293,7 @@ class BaseIOStream(object):
|
||||
except Exception:
|
||||
gen_log.error("Uncaught exception, closing connection.",
|
||||
exc_info=True)
|
||||
self.close()
|
||||
self.close(exc_info=True)
|
||||
raise
|
||||
|
||||
def _run_callback(self, callback, *args):
|
||||
@@ -300,7 +308,7 @@ class BaseIOStream(object):
|
||||
# (It would eventually get closed when the socket object is
|
||||
# gc'd, but we don't want to rely on gc happening before we
|
||||
# run out of file descriptors)
|
||||
self.close()
|
||||
self.close(exc_info=True)
|
||||
# Re-raise the exception so that IOLoop.handle_callback_exception
|
||||
# can see it and log the error
|
||||
raise
|
||||
@@ -348,7 +356,7 @@ class BaseIOStream(object):
|
||||
self._pending_callbacks -= 1
|
||||
except Exception:
|
||||
gen_log.warning("error on read", exc_info=True)
|
||||
self.close()
|
||||
self.close(exc_info=True)
|
||||
return
|
||||
if self._read_from_buffer():
|
||||
return
|
||||
@@ -397,9 +405,9 @@ class BaseIOStream(object):
|
||||
# Treat ECONNRESET as a connection close rather than
|
||||
# an error to minimize log spam (the exception will
|
||||
# be available on self.error for apps that care).
|
||||
self.close()
|
||||
self.close(exc_info=True)
|
||||
return
|
||||
self.close()
|
||||
self.close(exc_info=True)
|
||||
raise
|
||||
if chunk is None:
|
||||
return 0
|
||||
@@ -503,7 +511,7 @@ class BaseIOStream(object):
|
||||
else:
|
||||
gen_log.warning("Write error on %d: %s",
|
||||
self.fileno(), e)
|
||||
self.close()
|
||||
self.close(exc_info=True)
|
||||
return
|
||||
if not self._write_buffer and self._write_callback:
|
||||
callback = self._write_callback
|
||||
@@ -664,7 +672,7 @@ class IOStream(BaseIOStream):
|
||||
if e.args[0] not in (errno.EINPROGRESS, errno.EWOULDBLOCK):
|
||||
gen_log.warning("Connect error on fd %d: %s",
|
||||
self.socket.fileno(), e)
|
||||
self.close()
|
||||
self.close(exc_info=True)
|
||||
return
|
||||
self._connect_callback = stack_context.wrap(callback)
|
||||
self._add_io_state(self.io_loop.WRITE)
|
||||
@@ -733,7 +741,7 @@ class SSLIOStream(IOStream):
|
||||
return
|
||||
elif err.args[0] in (ssl.SSL_ERROR_EOF,
|
||||
ssl.SSL_ERROR_ZERO_RETURN):
|
||||
return self.close()
|
||||
return self.close(exc_info=True)
|
||||
elif err.args[0] == ssl.SSL_ERROR_SSL:
|
||||
try:
|
||||
peer = self.socket.getpeername()
|
||||
@@ -741,11 +749,11 @@ class SSLIOStream(IOStream):
|
||||
peer = '(not connected)'
|
||||
gen_log.warning("SSL Error on %d %s: %s",
|
||||
self.socket.fileno(), peer, err)
|
||||
return self.close()
|
||||
return self.close(exc_info=True)
|
||||
raise
|
||||
except socket.error, err:
|
||||
if err.args[0] in (errno.ECONNABORTED, errno.ECONNRESET):
|
||||
return self.close()
|
||||
return self.close(exc_info=True)
|
||||
else:
|
||||
self._ssl_accepting = False
|
||||
if self._ssl_connect_callback is not None:
|
||||
@@ -842,7 +850,7 @@ class PipeIOStream(BaseIOStream):
|
||||
elif e.args[0] == errno.EBADF:
|
||||
# If the writing half of a pipe is closed, select will
|
||||
# report it as readable but reads will fail with EBADF.
|
||||
self.close()
|
||||
self.close(exc_info=True)
|
||||
return None
|
||||
else:
|
||||
raise
|
||||
|
||||
@@ -431,6 +431,8 @@ class TwistedIOLoop(tornado.ioloop.IOLoop):
|
||||
self.reactor.removeWriter(self.fds[fd])
|
||||
|
||||
def remove_handler(self, fd):
|
||||
if fd not in self.fds:
|
||||
return
|
||||
self.fds[fd].lost = True
|
||||
if self.fds[fd].reading:
|
||||
self.reactor.removeReader(self.fds[fd])
|
||||
@@ -444,6 +446,12 @@ class TwistedIOLoop(tornado.ioloop.IOLoop):
|
||||
def stop(self):
|
||||
self.reactor.crash()
|
||||
|
||||
def _run_callback(self, callback, *args, **kwargs):
|
||||
try:
|
||||
callback(*args, **kwargs)
|
||||
except Exception:
|
||||
self.handle_callback_exception(callback)
|
||||
|
||||
def add_timeout(self, deadline, callback):
|
||||
if isinstance(deadline, (int, long, float)):
|
||||
delay = max(deadline - self.time(), 0)
|
||||
@@ -451,13 +459,14 @@ class TwistedIOLoop(tornado.ioloop.IOLoop):
|
||||
delay = deadline.total_seconds()
|
||||
else:
|
||||
raise TypeError("Unsupported deadline %r")
|
||||
return self.reactor.callLater(delay, wrap(callback))
|
||||
return self.reactor.callLater(delay, self._run_callback, wrap(callback))
|
||||
|
||||
def remove_timeout(self, timeout):
|
||||
timeout.cancel()
|
||||
|
||||
def add_callback(self, callback):
|
||||
self.reactor.callFromThread(wrap(callback))
|
||||
def add_callback(self, callback, *args, **kwargs):
|
||||
self.reactor.callFromThread(self._run_callback,
|
||||
wrap(callback), *args, **kwargs)
|
||||
|
||||
def add_callback_from_signal(self, callback):
|
||||
self.add_callback(callback)
|
||||
def add_callback_from_signal(self, callback, *args, **kwargs):
|
||||
self.add_callback(callback, *args, **kwargs)
|
||||
|
||||
@@ -268,7 +268,7 @@ class Subprocess(object):
|
||||
assert ret_pid == pid
|
||||
subproc = cls._waiting.pop(pid)
|
||||
subproc.io_loop.add_callback_from_signal(
|
||||
functools.partial(subproc._set_returncode, status))
|
||||
subproc._set_returncode, status)
|
||||
|
||||
def _set_returncode(self, status):
|
||||
if os.WIFSIGNALED(status):
|
||||
|
||||
@@ -12,7 +12,6 @@ from tornado.util import b, GzipDecompressor
|
||||
|
||||
import base64
|
||||
import collections
|
||||
import contextlib
|
||||
import copy
|
||||
import functools
|
||||
import os.path
|
||||
@@ -134,7 +133,7 @@ class _HTTPConnection(object):
|
||||
self._decompressor = None
|
||||
# Timeout handle returned by IOLoop.add_timeout
|
||||
self._timeout = None
|
||||
with stack_context.StackContext(self.cleanup):
|
||||
with stack_context.ExceptionStackContext(self._handle_exception):
|
||||
self.parsed = urlparse.urlsplit(_unicode(self.request.url))
|
||||
if ssl is None and self.parsed.scheme == "https":
|
||||
raise ValueError("HTTPS requires either python2.6+ or "
|
||||
@@ -309,19 +308,24 @@ class _HTTPConnection(object):
|
||||
if self.final_callback is not None:
|
||||
final_callback = self.final_callback
|
||||
self.final_callback = None
|
||||
final_callback(response)
|
||||
self.io_loop.add_callback(final_callback, response)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def cleanup(self):
|
||||
try:
|
||||
yield
|
||||
except Exception, e:
|
||||
gen_log.warning("uncaught exception", exc_info=True)
|
||||
self._run_callback(HTTPResponse(self.request, 599, error=e,
|
||||
def _handle_exception(self, typ, value, tb):
|
||||
if self.final_callback:
|
||||
gen_log.warning("uncaught exception", exc_info=(typ, value, tb))
|
||||
self._run_callback(HTTPResponse(self.request, 599, error=value,
|
||||
request_time=self.io_loop.time() - self.start_time,
|
||||
))
|
||||
|
||||
if hasattr(self, "stream"):
|
||||
self.stream.close()
|
||||
return True
|
||||
else:
|
||||
# If our callback has already been called, we are probably
|
||||
# catching an exception that is not caused by us but rather
|
||||
# some child of our callback. Rather than drop it on the floor,
|
||||
# pass it along.
|
||||
return False
|
||||
|
||||
def _on_close(self):
|
||||
if self.final_callback is not None:
|
||||
|
||||
@@ -36,9 +36,8 @@ except ImportError:
|
||||
netutil = None
|
||||
SimpleAsyncHTTPClient = None
|
||||
from tornado.log import gen_log
|
||||
from tornado.stack_context import StackContext
|
||||
from tornado.stack_context import ExceptionStackContext
|
||||
from tornado.util import raise_exc_info
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
@@ -167,13 +166,10 @@ class AsyncTestCase(unittest.TestCase):
|
||||
'''
|
||||
return IOLoop()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _stack_context(self):
|
||||
try:
|
||||
yield
|
||||
except Exception:
|
||||
self.__failure = sys.exc_info()
|
||||
self.stop()
|
||||
def _handle_exception(self, typ, value, tb):
|
||||
self.__failure = sys.exc_info()
|
||||
self.stop()
|
||||
return True
|
||||
|
||||
def __rethrow(self):
|
||||
if self.__failure is not None:
|
||||
@@ -182,7 +178,7 @@ class AsyncTestCase(unittest.TestCase):
|
||||
raise_exc_info(failure)
|
||||
|
||||
def run(self, result=None):
|
||||
with StackContext(self._stack_context):
|
||||
with ExceptionStackContext(self._handle_exception):
|
||||
super(AsyncTestCase, self).run(result)
|
||||
# In case an exception escaped super.run or the StackContext caught
|
||||
# an exception when there wasn't a wait() to re-raise it, do so here.
|
||||
|
||||
@@ -1317,10 +1317,8 @@ class Application(object):
|
||||
def add_handlers(self, host_pattern, host_handlers):
|
||||
"""Appends the given handlers to our handler list.
|
||||
|
||||
Note that host patterns are processed sequentially in the
|
||||
order they were added, and only the first matching pattern is
|
||||
used. This means that all handlers for a given host must be
|
||||
added in a single add_handlers call.
|
||||
Host patterns are processed sequentially in the order they were
|
||||
added. All matching patterns will be considered.
|
||||
"""
|
||||
if not host_pattern.endswith("$"):
|
||||
host_pattern += "$"
|
||||
@@ -1365,15 +1363,16 @@ class Application(object):
|
||||
|
||||
def _get_host_handlers(self, request):
|
||||
host = request.host.lower().split(':')[0]
|
||||
matches = []
|
||||
for pattern, handlers in self.handlers:
|
||||
if pattern.match(host):
|
||||
return handlers
|
||||
matches.extend(handlers)
|
||||
# Look for default host if not behind load balancer (for debugging)
|
||||
if "X-Real-Ip" not in request.headers:
|
||||
if not matches and "X-Real-Ip" not in request.headers:
|
||||
for pattern, handlers in self.handlers:
|
||||
if pattern.match(self.default_host):
|
||||
return handlers
|
||||
return None
|
||||
matches.extend(handlers)
|
||||
return matches or None
|
||||
|
||||
def _load_ui_methods(self, methods):
|
||||
if type(methods) is types.ModuleType:
|
||||
|
||||
Reference in New Issue
Block a user