diff --git a/CouchPotato.py b/CouchPotato.py index dd1e6549..00992780 100755 --- a/CouchPotato.py +++ b/CouchPotato.py @@ -89,7 +89,6 @@ class Loader(object): if self.runAsDaemon(): try: self.daemon.stop() except: pass - self.daemon.delpid() except: self.log.critical(traceback.format_exc()) diff --git a/couchpotato/core/_base/_core/main.py b/couchpotato/core/_base/_core/main.py index a496df60..f7abddec 100644 --- a/couchpotato/core/_base/_core/main.py +++ b/couchpotato/core/_base/_core/main.py @@ -5,7 +5,7 @@ from couchpotato.core.helpers.variable import cleanHost, md5 from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env -from flask import request +from tornado.ioloop import IOLoop from uuid import uuid4 import os import platform @@ -18,7 +18,7 @@ log = CPLog(__name__) class Core(Plugin): - ignore_restart = ['Core.crappyRestart', 'Core.crappyShutdown'] + ignore_restart = ['Core.restart', 'Core.shutdown', 'Updater.check'] shutdown_started = False def __init__(self): @@ -37,8 +37,8 @@ class Core(Plugin): 'desc': 'Get version.' }) - addEvent('app.crappy_shutdown', self.crappyShutdown) - addEvent('app.crappy_restart', self.crappyRestart) + addEvent('app.shutdown', self.shutdown) + addEvent('app.restart', self.restart) addEvent('app.load', self.launchBrowser, priority = 1) addEvent('app.base_url', self.createBaseUrl) addEvent('app.api_url', self.createApiUrl) @@ -59,34 +59,24 @@ class Core(Plugin): 'succes': True }) - def crappyShutdown(self): - if self.shutdown_started: - return - - try: - self.urlopen('%s/app.shutdown' % self.createApiUrl(), show_error = False) - return True - except: - self.initShutdown() - return False - - def crappyRestart(self): - if self.shutdown_started: - return - - try: - self.urlopen('%s/app.restart' % self.createApiUrl(), show_error = False) - return True - except: - self.initShutdown(restart = True) - return False - def shutdown(self): - self.initShutdown() + if self.shutdown_started: + return False + + def shutdown(): + self.initShutdown() + IOLoop.instance().add_callback(shutdown) + return 'shutdown' def restart(self): - self.initShutdown(restart = True) + if self.shutdown_started: + return False + + def restart(): + self.initShutdown(restart = True) + IOLoop.instance().add_callback(restart) + return 'restarting' def initShutdown(self, restart = False): @@ -121,7 +111,8 @@ class Core(Plugin): log.debug('Save to shutdown/restart') try: - request.environ.get('werkzeug.server.shutdown')() + Env.get('httpserver').stop() + IOLoop.instance().stop() except RuntimeError: pass except: diff --git a/couchpotato/core/_base/desktop/main.py b/couchpotato/core/_base/desktop/main.py index ce1ff282..dcec7050 100644 --- a/couchpotato/core/_base/desktop/main.py +++ b/couchpotato/core/_base/desktop/main.py @@ -27,7 +27,7 @@ if Env.get('desktop'): addEvent('app.after_shutdown', desktop.afterShutdown) def onClose(self, event): - return fireEvent('app.crappy_shutdown', single = True) + return fireEvent('app.shutdown', single = True) else: diff --git a/couchpotato/core/_base/updater/main.py b/couchpotato/core/_base/updater/main.py index 17749239..87716610 100644 --- a/couchpotato/core/_base/updater/main.py +++ b/couchpotato/core/_base/updater/main.py @@ -55,7 +55,7 @@ class Updater(Plugin): if self.updater.check(): if self.conf('automatic') and not self.updater.update_failed: if self.updater.doUpdate(): - fireEventAsync('app.crappy_restart') + fireEventAsync('app.restart') else: if self.conf('notification'): fireEvent('updater.available', message = 'A new update is available', data = self.updater.info()) @@ -338,7 +338,7 @@ class SourceUpdater(BaseUpdater): return {} -class DesktopUpdater(Plugin): +class DesktopUpdater(BaseUpdater): version = None update_failed = False @@ -350,9 +350,15 @@ class DesktopUpdater(Plugin): def doUpdate(self): try: - self.desktop.CheckForUpdate(silentUnlessUpdate = True) + def do_restart(e): + if e['status'] == 'done': + fireEventAsync('app.restart') + else: + log.error('Failed updating desktop: %s' % e['exception']) + self.update_failed = True + + self.desktop._esky.auto_update(callback = do_restart) except: - log.error('Failed updating desktop: %s' % traceback.format_exc()) self.update_failed = True return False diff --git a/couchpotato/core/downloaders/blackhole/main.py b/couchpotato/core/downloaders/blackhole/main.py index a24e71b1..c12c8c6e 100644 --- a/couchpotato/core/downloaders/blackhole/main.py +++ b/couchpotato/core/downloaders/blackhole/main.py @@ -10,7 +10,7 @@ class Blackhole(Downloader): type = ['nzb', 'torrent'] - def download(self, data = {}, movie = {}, manual = False): + def download(self, data = {}, movie = {}, manual = False, filedata = None): if self.isDisabled(manual) or (not self.isCorrectType(data.get('type')) or (not self.conf('use_for') in ['both', data.get('type')])): return @@ -19,10 +19,8 @@ class Blackhole(Downloader): log.error('No directory set for blackhole %s download.' % data.get('type')) else: try: - filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id')) - - if len(filedata) < 50: - log.error('No nzb available!') + if not filedata or len(filedata) < 50: + log.error('No nzb/torrent available!') return False fullPath = os.path.join(directory, self.createFileName(data, filedata, movie)) @@ -42,6 +40,6 @@ class Blackhole(Downloader): pass except: - log.debug('Failed to download file %s: %s' % (data.get('name'), traceback.format_exc())) + log.info('Failed to download file %s: %s' % (data.get('name'), traceback.format_exc())) return False return False diff --git a/couchpotato/core/downloaders/nzbget/main.py b/couchpotato/core/downloaders/nzbget/main.py index 15243ca4..904a3a4e 100644 --- a/couchpotato/core/downloaders/nzbget/main.py +++ b/couchpotato/core/downloaders/nzbget/main.py @@ -14,11 +14,15 @@ class NZBGet(Downloader): url = 'http://nzbget:%(password)s@%(host)s/xmlrpc' - def download(self, data = {}, movie = {}, manual = False): + def download(self, data = {}, movie = {}, manual = False, filedata = None): if self.isDisabled(manual) or not self.isCorrectType(data.get('type')): return + if not filedata: + log.error('Unable to get NZB file: %s' % traceback.format_exc()) + return False + log.info('Sending "%s" to NZBGet.' % data.get('name')) url = self.url % {'host': self.conf('host'), 'password': self.conf('password')} @@ -40,19 +44,6 @@ class NZBGet(Downloader): log.error('Protocol Error: %s' % e) return False - try: - if isfunction(data.get('download')): - filedata = data.get('download')() - if not filedata: - log.error('Failed download file: %s' % nzb_name) - return False - else: - log.info('Downloading: %s' % data.get('url')) - filedata = self.urlopen(data.get('url')) - except: - log.error('Unable to get NZB file: %s' % traceback.format_exc()) - return False - if rpc.append(nzb_name, self.conf('category'), False, standard_b64encode(filedata.strip())): log.info('NZB sent successfully to NZBGet') return True diff --git a/couchpotato/core/downloaders/sabnzbd/main.py b/couchpotato/core/downloaders/sabnzbd/main.py index 5505438e..d6c5af6c 100644 --- a/couchpotato/core/downloaders/sabnzbd/main.py +++ b/couchpotato/core/downloaders/sabnzbd/main.py @@ -15,7 +15,7 @@ class Sabnzbd(Downloader): type = ['nzb'] - def download(self, data = {}, movie = {}, manual = False): + def download(self, data = {}, movie = {}, manual = False, filedata = None): if self.isDisabled(manual) or not self.isCorrectType(data.get('type')): return @@ -42,15 +42,13 @@ class Sabnzbd(Downloader): 'nzbname': self.createNzbName(data, movie), } - if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))): - nzb_file = data.get('download')(url = data.get('url'), nzb_id = data.get('id')) - - if not nzb_file or len(nzb_file) < 50: - log.error('No nzb available!') + if filedata: + if len(filedata) < 50: + log.error('No proper nzb available!') return False # If it's a .rar, it adds the .rar extension, otherwise it stays .nzb - nzb_filename = self.createFileName(data, nzb_file, movie) + nzb_filename = self.createFileName(data, filedata, movie) params['mode'] = 'addfile' else: params['name'] = data.get('url') @@ -62,7 +60,7 @@ class Sabnzbd(Downloader): try: if params.get('mode') is 'addfile': - data = self.urlopen(url, params = {"nzbfile": (nzb_filename, nzb_file)}, multipart = True, show_error = False) + data = self.urlopen(url, params = {"nzbfile": (nzb_filename, filedata)}, multipart = True, show_error = False) else: data = self.urlopen(url, show_error = False) except: diff --git a/couchpotato/core/downloaders/transmission/main.py b/couchpotato/core/downloaders/transmission/main.py index 4ed3e583..55e4e305 100644 --- a/couchpotato/core/downloaders/transmission/main.py +++ b/couchpotato/core/downloaders/transmission/main.py @@ -11,7 +11,7 @@ class Transmission(Downloader): type = ['torrent'] - def download(self, data = {}, movie = {}, manual = False): + def download(self, data = {}, movie = {}, manual = False, filedata = None): if self.isDisabled(manual) or not self.isCorrectType(data.get('type')): return @@ -31,8 +31,10 @@ class Transmission(Downloader): } try: + if not filedata: + log.error('Failed sending torrent to transmission, no data') + tc = transmissionrpc.Client(host[0], port = host[1], user = self.conf('username'), password = self.conf('password')) - filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id')) torrent = tc.add_torrent(b64encode(filedata), **params) # Change settings of added torrents diff --git a/couchpotato/core/helpers/variable.py b/couchpotato/core/helpers/variable.py index 29e4bf4d..9a4bdc13 100644 --- a/couchpotato/core/helpers/variable.py +++ b/couchpotato/core/helpers/variable.py @@ -2,7 +2,9 @@ from couchpotato.core.logger import CPLog import hashlib import os.path import platform +import random import re +import string log = CPLog(__name__) @@ -77,9 +79,9 @@ def cleanHost(host): return host -def getImdb(txt): +def getImdb(txt, check_inside = True): - if os.path.isfile(txt): + if check_inside and os.path.isfile(txt): output = open(txt, 'r') txt = output.read() output.close() @@ -117,3 +119,6 @@ def getTitle(library_dict): log.error('Could not get title for library item: %s' % library_dict) return None +def randomString(size = 8, chars = string.ascii_uppercase + string.digits): + return ''.join(random.choice(chars) for x in range(size)) + diff --git a/couchpotato/core/plugins/library/main.py b/couchpotato/core/plugins/library/main.py index 4cdc323f..347d8580 100644 --- a/couchpotato/core/plugins/library/main.py +++ b/couchpotato/core/plugins/library/main.py @@ -136,7 +136,12 @@ class LibraryPlugin(Plugin): db = get_session() library = db.query(Library).filter_by(identifier = identifier).first() - dates = library.info.get('release_date') + + if not library.info: + self.update(identifier) + dates = library.get('info', {}).get('release_dates') + else: + dates = library.info.get('release_date') if dates and dates.get('expires', 0) < time.time(): dates = fireEvent('movie.release_date', identifier = identifier, merge = True) diff --git a/couchpotato/core/plugins/renamer/__init__.py b/couchpotato/core/plugins/renamer/__init__.py index b1b53395..3fb29f27 100644 --- a/couchpotato/core/plugins/renamer/__init__.py +++ b/couchpotato/core/plugins/renamer/__init__.py @@ -17,7 +17,7 @@ rename_options = { 'audio': 'Audio (DTS)', 'group': 'Releasegroup name', 'source': 'Source media (Bluray)', - 'original': 'Original filename', + 'filename': 'Original filename', 'original_folder': 'Original foldername', }, } diff --git a/couchpotato/core/plugins/renamer/main.py b/couchpotato/core/plugins/renamer/main.py index 24b031e4..f2b98564 100644 --- a/couchpotato/core/plugins/renamer/main.py +++ b/couchpotato/core/plugins/renamer/main.py @@ -85,6 +85,7 @@ class Renamer(Plugin): movie_title = getTitle(group['library']) # Add _UNKNOWN_ if no library item is connected + unknown = False if not group['library'] or not movie_title: if group['dirname']: rename_files[group['parentdir']] = group['parentdir'].replace(group['dirname'], '_UNKNOWN_%s' % group['dirname']) @@ -94,6 +95,7 @@ class Renamer(Plugin): filename = os.path.basename(rename_me) rename_files[rename_me] = rename_me.replace(filename, '_UNKNOWN_%s' % filename) + unknown = True # Rename the files using the library data else: group['library'] = fireEvent('library.update', identifier = group['library']['identifier'], single = True) @@ -325,6 +327,18 @@ class Renamer(Plugin): elif not remove_leftovers: # Don't remove anything remove_files = [] + # Remove files + for src in remove_files: + + if isinstance(src, File): + src = src.path + + log.info('Removing "%s"' % src) + try: + os.remove(src) + except: + log.error('Failed removing %s: %s' % (src, traceback.format_exc())) + # Rename all files marked group['renamed_files'] = [] for src in rename_files: @@ -341,18 +355,6 @@ class Renamer(Plugin): except: log.error('Failed moving the file "%s" : %s' % (os.path.basename(src), traceback.format_exc())) - # Remove files - for src in remove_files: - - if isinstance(src, File): - src = src.path - - log.info('Removing "%s"' % src) - try: - os.remove(src) - except: - log.error('Failed removing %s: %s' % (src, traceback.format_exc())) - # Remove matching releases for release in remove_releases: log.debug('Removing release %s' % release.identifier) @@ -368,12 +370,13 @@ class Renamer(Plugin): except: log.error('Failed removing %s: %s' % (group['parentdir'], traceback.format_exc())) - # Search for trailers etc - fireEventAsync('renamer.after', group) + if not unknown: + # Search for trailers etc + fireEventAsync('renamer.after', group) - # Notify on download - download_message = 'Downloaded %s (%s)' % (movie_title, replacements['quality']) - fireEventAsync('movie.downloaded', message = download_message, data = group) + # Notify on download + download_message = 'Downloaded %s (%s)' % (movie_title, replacements['quality']) + fireEventAsync('movie.downloaded', message = download_message, data = group) # Break if CP wants to shut down if self.shuttingDown(): diff --git a/couchpotato/core/plugins/scanner/main.py b/couchpotato/core/plugins/scanner/main.py index 136a1964..e287fd33 100644 --- a/couchpotato/core/plugins/scanner/main.py +++ b/couchpotato/core/plugins/scanner/main.py @@ -95,6 +95,8 @@ class Scanner(Plugin): def scanFilesToLibrary(self, folder = None, files = None): + folder = os.path.normpath(folder) + groups = self.scan(folder = folder, files = files) for group in groups.itervalues(): @@ -103,6 +105,8 @@ class Scanner(Plugin): def scanFolderToLibrary(self, folder = None, newer_than = None, simple = True): + folder = os.path.normpath(folder) + if not os.path.isdir(folder): return @@ -129,6 +133,8 @@ class Scanner(Plugin): def scan(self, folder = None, files = [], simple = False): + folder = os.path.normpath(folder) + if not folder or not os.path.isdir(folder): log.error('Folder doesn\'t exists: %s' % folder) return {} @@ -448,6 +454,18 @@ class Scanner(Plugin): except: pass + # Check and see if filenames contains the imdb-id + if not imdb_id: + try: + for filetype in files: + for filetype_file in files[filetype]: + imdb_id = getImdb(filetype_file, check_inside = False) + if imdb_id: + log.debug('Found movie via imdb in filename: %s' % nfo_file) + break + except: + pass + # Check if path is already in db if not imdb_id: db = get_session() @@ -703,11 +721,12 @@ class Scanner(Plugin): def getReleaseNameYear(self, release_name, file_name = None): # Use guessit first + guess = {} if file_name: try: guess = guess_movie_info(file_name) if guess.get('title') and guess.get('year'): - return { + guess = { 'name': guess.get('title'), 'year': guess.get('year'), } @@ -718,11 +737,12 @@ class Scanner(Plugin): cleaned = ' '.join(re.split('\W+', simplifyString(release_name))) cleaned = re.sub(self.clean, ' ', cleaned) year = self.findYear(cleaned) + cp_guess = {} if year: # Split name on year try: movie_name = cleaned.split(year).pop(0).strip() - return { + cp_guess = { 'name': movie_name, 'year': int(year), } @@ -731,11 +751,16 @@ class Scanner(Plugin): else: # Split name on multiple spaces try: movie_name = cleaned.split(' ').pop(0).strip() - return { + cp_guess = { 'name': movie_name, 'year': int(year), } except: pass - return {} + if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) > len(guess.get('name', '')): + return cp_guess + elif guess == {}: + return cp_guess + + return guess diff --git a/couchpotato/core/plugins/score/main.py b/couchpotato/core/plugins/score/main.py index 74fed1da..8b4fedb4 100644 --- a/couchpotato/core/plugins/score/main.py +++ b/couchpotato/core/plugins/score/main.py @@ -38,4 +38,9 @@ class Score(Plugin): # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie['library'])) + # Extra provider specific check + extra_score = nzb.get('extra_score') + if extra_score: + score += extra_score(nzb) + return score diff --git a/couchpotato/core/plugins/searcher/main.py b/couchpotato/core/plugins/searcher/main.py index 254919f0..99392ac7 100644 --- a/couchpotato/core/plugins/searcher/main.py +++ b/couchpotato/core/plugins/searcher/main.py @@ -6,6 +6,7 @@ from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.core.settings.model import Movie, Release, ReleaseInfo from couchpotato.environment import Env +from inspect import ismethod, isfunction from sqlalchemy.exc import InterfaceError import datetime import re @@ -142,9 +143,9 @@ class Searcher(Plugin): for nzb in sorted_results: downloaded = self.download(data = nzb, movie = movie) - if downloaded: + if downloaded is True: return True - else: + elif downloaded != 'try_next': break else: log.info('Better quality (%s) already available or snatched for %s' % (quality_type['quality']['label'], default_title)) @@ -161,7 +162,15 @@ class Searcher(Plugin): def download(self, data, movie, manual = False): snatched_status = fireEvent('status.get', 'snatched', single = True) - successful = fireEvent('download', data = data, movie = movie, manual = manual, single = True) + + # Download movie to temp + filedata = None + if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))): + filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id')) + if filedata is 'try_next': + return filedata + + successful = fireEvent('download', data = data, movie = movie, manual = manual, single = True, filedata = filedata) if successful: @@ -214,15 +223,17 @@ class Searcher(Plugin): log.info('Wrong: Outside retention, age is %s, needs %s or lower: %s' % (nzb['age'], retention, nzb['name'])) return False - movie_name = simplifyString(nzb['name']) - nzb_words = re.split('\W+', movie_name) - required_words = [x.strip() for x in self.conf('required_words').split(',')] + movie_name = getTitle(movie['library']) + movie_words = re.split('\W+', simplifyString(movie_name)) + nzb_name = simplifyString(nzb['name']) + nzb_words = re.split('\W+', nzb_name) + required_words = [x.strip().lower() for x in self.conf('required_words').lower().split(',')] if self.conf('required_words') and not list(set(nzb_words) & set(required_words)): log.info("NZB doesn't contain any of the required words.") return False - ignored_words = [x.strip() for x in self.conf('ignored_words').split(',')] + ignored_words = [x.strip().lower() for x in self.conf('ignored_words').split(',')] blacklisted = list(set(nzb_words) & set(ignored_words)) if self.conf('ignored_words') and blacklisted: log.info("Wrong: '%s' blacklisted words: %s" % (nzb['name'], ", ".join(blacklisted))) @@ -230,7 +241,7 @@ class Searcher(Plugin): pron_tags = ['xxx', 'sex', 'anal', 'tits', 'fuck', 'porn', 'orgy', 'milf', 'boobs'] for p_tag in pron_tags: - if p_tag in movie_name: + if p_tag in nzb_words and p_tag not in movie_words: log.info('Wrong: %s, probably pr0n' % (nzb['name'])) return False @@ -254,6 +265,16 @@ class Searcher(Plugin): return False + # Provider specific functions + get_more = nzb.get('get_more_info') + if get_more: + get_more(nzb) + + extra_check = nzb.get('extra_check') + if extra_check and not extra_check(nzb): + return False + + if imdb_results: return True @@ -277,7 +298,7 @@ class Searcher(Plugin): if self.checkNFO(nzb['name'], movie['library']['identifier']): return True - log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'" % (nzb['name'], getTitle(movie['library']), movie['library']['year'])) + log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'" % (nzb['name'], movie_name, movie['library']['year'])) return False def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = {}, single_category = False): diff --git a/couchpotato/core/providers/metadata/base.py b/couchpotato/core/providers/metadata/base.py index 0f59c9f5..d7e9e035 100644 --- a/couchpotato/core/providers/metadata/base.py +++ b/couchpotato/core/providers/metadata/base.py @@ -75,7 +75,7 @@ class MetaDataBase(Plugin): break for cur_file in data['library'].get('files', []): - if cur_file.get('type_id') is file_type.get('id'): + if cur_file.get('type_id') is file_type.get('id') and os.path.isfile(cur_file.get('path')): return cur_file.get('path') def getFanart(self, movie_info = {}, data = {}): diff --git a/couchpotato/core/providers/movie/_modifier/main.py b/couchpotato/core/providers/movie/_modifier/main.py index 60e4c274..4b2ebb66 100644 --- a/couchpotato/core/providers/movie/_modifier/main.py +++ b/couchpotato/core/providers/movie/_modifier/main.py @@ -1,10 +1,9 @@ from couchpotato import get_session from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.helpers.variable import mergeDicts +from couchpotato.core.helpers.variable import mergeDicts, randomString from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.core.settings.model import Library -import time import traceback log = CPLog(__name__) @@ -23,11 +22,19 @@ class MovieResultModifier(Plugin): # Combine on imdb id for item in results: - imdb = item.get('imdb', 'random-%s' % time.time()) + random_string = randomString() + imdb = item.get('imdb', random_string) + imdb = imdb if imdb else random_string + if not temp.get(imdb): temp[imdb] = self.getLibraryTags(imdb) order.append(imdb) + if item.get('via_imdb'): + if order.index(imdb): + order.remove(imdb) + order.insert(0, imdb) + # Merge dicts temp[imdb] = mergeDicts(temp[imdb], item) diff --git a/couchpotato/core/providers/movie/imdbapi/main.py b/couchpotato/core/providers/movie/imdbapi/main.py index 71255e60..7456a3c9 100644 --- a/couchpotato/core/providers/movie/imdbapi/main.py +++ b/couchpotato/core/providers/movie/imdbapi/main.py @@ -82,6 +82,7 @@ class IMDBAPI(MovieProvider): year = tryInt(movie.get('Year', '')) movie_data = { + 'via_imdb': True, 'titles': [movie.get('Title')] if movie.get('Title') else [], 'original_title': movie.get('Title', ''), 'images': { @@ -109,10 +110,10 @@ class IMDBAPI(MovieProvider): def runtimeToMinutes(self, runtime_str): runtime = 0 - regex = '(\d*.?\d+).(hr|hrs|mins|min)+' + regex = '(\d*.?\d+).(h|hr|hrs|mins|min)+' matches = re.findall(regex, runtime_str) for match in matches: nr, size = match - runtime += tryInt(nr) * (60 if 'hr' in str(size) else 1) + runtime += tryInt(nr) * (60 if 'h' is str(size)[0] else 1) return runtime diff --git a/couchpotato/core/providers/movie/themoviedb/main.py b/couchpotato/core/providers/movie/themoviedb/main.py index f66f2f7a..387fcac4 100644 --- a/couchpotato/core/providers/movie/themoviedb/main.py +++ b/couchpotato/core/providers/movie/themoviedb/main.py @@ -3,7 +3,6 @@ from couchpotato.core.helpers.encoding import simplifyString, toUnicode from couchpotato.core.logger import CPLog from couchpotato.core.providers.movie.base import MovieProvider from libs.themoviedb import tmdb -import re log = CPLog(__name__) @@ -88,6 +87,9 @@ class TheMovieDb(MovieProvider): def getInfo(self, identifier = None): + if not identifier: + return {} + cache_key = 'tmdb.cache.%s' % identifier result = self.getCache(cache_key) @@ -148,6 +150,7 @@ class TheMovieDb(MovieProvider): year = None movie_data = { + 'via_tmdb': True, 'id': int(movie.get('id', 0)), 'titles': [toUnicode(movie.get('name'))], 'original_title': movie.get('original_name'), diff --git a/couchpotato/core/providers/nzb/moovee/__init__.py b/couchpotato/core/providers/nzb/moovee/__init__.py deleted file mode 100644 index f2f85d18..00000000 --- a/couchpotato/core/providers/nzb/moovee/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -from .main import Moovee - -def start(): - return Moovee() - -config = [{ - 'name': 'moovee', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'name': '#alt.binaries.moovee', - 'description': 'SD movies only', - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/moovee/main.py b/couchpotato/core/providers/nzb/moovee/main.py deleted file mode 100644 index 65f118d7..00000000 --- a/couchpotato/core/providers/nzb/moovee/main.py +++ /dev/null @@ -1,66 +0,0 @@ -from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.variable import getTitle -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.nzb.base import NZBProvider -from dateutil.parser import parse -import re -import time - -log = CPLog(__name__) - - -class Moovee(NZBProvider): - - urls = { - 'download': 'http://85.214.105.230/get_nzb.php?id=%s§ion=moovee', - 'search': 'http://abmoovee.allfilled.com/search.php?q=%s&Search=Search', - } - - regex = '
" not in value:
+ value = re.sub(r"([\t ]+)", " ", value)
+ value = re.sub(r"(\s*\n\s*)", "\n", value)
+
+ if value:
+ writer.write_line('_append(%r)' % escape.utf8(value), self.line)
+
+
+class ParseError(Exception):
+ """Raised for template syntax errors."""
+ pass
+
+
+class _CodeWriter(object):
+ def __init__(self, file, named_blocks, loader, current_template,
+ compress_whitespace):
+ self.file = file
+ self.named_blocks = named_blocks
+ self.loader = loader
+ self.current_template = current_template
+ self.compress_whitespace = compress_whitespace
+ self.apply_counter = 0
+ self.include_stack = []
+ self._indent = 0
+
+ def indent_size(self):
+ return self._indent
+
+ def indent(self):
+ class Indenter(object):
+ def __enter__(_):
+ self._indent += 1
+ return self
+
+ def __exit__(_, *args):
+ assert self._indent > 0
+ self._indent -= 1
+
+ return Indenter()
+
+ def include(self, template, line):
+ self.include_stack.append((self.current_template, line))
+ self.current_template = template
+
+ class IncludeTemplate(object):
+ def __enter__(_):
+ return self
+
+ def __exit__(_, *args):
+ self.current_template = self.include_stack.pop()[0]
+
+ return IncludeTemplate()
+
+ def write_line(self, line, line_number, indent=None):
+ if indent == None:
+ indent = self._indent
+ line_comment = ' # %s:%d' % (self.current_template.name, line_number)
+ if self.include_stack:
+ ancestors = ["%s:%d" % (tmpl.name, lineno)
+ for (tmpl, lineno) in self.include_stack]
+ line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
+ print >> self.file, " "*indent + line + line_comment
+
+
+class _TemplateReader(object):
+ def __init__(self, name, text):
+ self.name = name
+ self.text = text
+ self.line = 1
+ self.pos = 0
+
+ def find(self, needle, start=0, end=None):
+ assert start >= 0, start
+ pos = self.pos
+ start += pos
+ if end is None:
+ index = self.text.find(needle, start)
+ else:
+ end += pos
+ assert end >= start
+ index = self.text.find(needle, start, end)
+ if index != -1:
+ index -= pos
+ return index
+
+ def consume(self, count=None):
+ if count is None:
+ count = len(self.text) - self.pos
+ newpos = self.pos + count
+ self.line += self.text.count("\n", self.pos, newpos)
+ s = self.text[self.pos:newpos]
+ self.pos = newpos
+ return s
+
+ def remaining(self):
+ return len(self.text) - self.pos
+
+ def __len__(self):
+ return self.remaining()
+
+ def __getitem__(self, key):
+ if type(key) is slice:
+ size = len(self)
+ start, stop, step = key.indices(size)
+ if start is None: start = self.pos
+ else: start += self.pos
+ if stop is not None: stop += self.pos
+ return self.text[slice(start, stop, step)]
+ elif key < 0:
+ return self.text[key]
+ else:
+ return self.text[self.pos + key]
+
+ def __str__(self):
+ return self.text[self.pos:]
+
+
+def _format_code(code):
+ lines = code.splitlines()
+ format = "%%%dd %%s\n" % len(repr(len(lines) + 1))
+ return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
+
+
+def _parse(reader, template, in_block=None):
+ body = _ChunkList([])
+ while True:
+ # Find next template directive
+ curly = 0
+ while True:
+ curly = reader.find("{", curly)
+ if curly == -1 or curly + 1 == reader.remaining():
+ # EOF
+ if in_block:
+ raise ParseError("Missing {%% end %%} block for %s" %
+ in_block)
+ body.chunks.append(_Text(reader.consume(), reader.line))
+ return body
+ # If the first curly brace is not the start of a special token,
+ # start searching from the character after it
+ if reader[curly + 1] not in ("{", "%", "#"):
+ curly += 1
+ continue
+ # When there are more than 2 curlies in a row, use the
+ # innermost ones. This is useful when generating languages
+ # like latex where curlies are also meaningful
+ if (curly + 2 < reader.remaining() and
+ reader[curly + 1] == '{' and reader[curly + 2] == '{'):
+ curly += 1
+ continue
+ break
+
+ # Append any text before the special token
+ if curly > 0:
+ cons = reader.consume(curly)
+ body.chunks.append(_Text(cons, reader.line))
+
+ start_brace = reader.consume(2)
+ line = reader.line
+
+ # Template directives may be escaped as "{{!" or "{%!".
+ # In this case output the braces and consume the "!".
+ # This is especially useful in conjunction with jquery templates,
+ # which also use double braces.
+ if reader.remaining() and reader[0] == "!":
+ reader.consume(1)
+ body.chunks.append(_Text(start_brace, line))
+ continue
+
+ # Comment
+ if start_brace == "{#":
+ end = reader.find("#}")
+ if end == -1:
+ raise ParseError("Missing end expression #} on line %d" % line)
+ contents = reader.consume(end).strip()
+ reader.consume(2)
+ continue
+
+ # Expression
+ if start_brace == "{{":
+ end = reader.find("}}")
+ if end == -1:
+ raise ParseError("Missing end expression }} on line %d" % line)
+ contents = reader.consume(end).strip()
+ reader.consume(2)
+ if not contents:
+ raise ParseError("Empty expression on line %d" % line)
+ body.chunks.append(_Expression(contents, line))
+ continue
+
+ # Block
+ assert start_brace == "{%", start_brace
+ end = reader.find("%}")
+ if end == -1:
+ raise ParseError("Missing end block %%} on line %d" % line)
+ contents = reader.consume(end).strip()
+ reader.consume(2)
+ if not contents:
+ raise ParseError("Empty block tag ({%% %%}) on line %d" % line)
+
+ operator, space, suffix = contents.partition(" ")
+ suffix = suffix.strip()
+
+ # Intermediate ("else", "elif", etc) blocks
+ intermediate_blocks = {
+ "else": set(["if", "for", "while"]),
+ "elif": set(["if"]),
+ "except": set(["try"]),
+ "finally": set(["try"]),
+ }
+ allowed_parents = intermediate_blocks.get(operator)
+ if allowed_parents is not None:
+ if not in_block:
+ raise ParseError("%s outside %s block" %
+ (operator, allowed_parents))
+ if in_block not in allowed_parents:
+ raise ParseError("%s block cannot be attached to %s block" % (operator, in_block))
+ body.chunks.append(_IntermediateControlBlock(contents, line))
+ continue
+
+ # End tag
+ elif operator == "end":
+ if not in_block:
+ raise ParseError("Extra {%% end %%} block on line %d" % line)
+ return body
+
+ elif operator in ("extends", "include", "set", "import", "from",
+ "comment", "autoescape", "raw", "module"):
+ if operator == "comment":
+ continue
+ if operator == "extends":
+ suffix = suffix.strip('"').strip("'")
+ if not suffix:
+ raise ParseError("extends missing file path on line %d" % line)
+ block = _ExtendsBlock(suffix)
+ elif operator in ("import", "from"):
+ if not suffix:
+ raise ParseError("import missing statement on line %d" % line)
+ block = _Statement(contents, line)
+ elif operator == "include":
+ suffix = suffix.strip('"').strip("'")
+ if not suffix:
+ raise ParseError("include missing file path on line %d" % line)
+ block = _IncludeBlock(suffix, reader, line)
+ elif operator == "set":
+ if not suffix:
+ raise ParseError("set missing statement on line %d" % line)
+ block = _Statement(suffix, line)
+ elif operator == "autoescape":
+ fn = suffix.strip()
+ if fn == "None": fn = None
+ template.autoescape = fn
+ continue
+ elif operator == "raw":
+ block = _Expression(suffix, line, raw=True)
+ elif operator == "module":
+ block = _Module(suffix, line)
+ body.chunks.append(block)
+ continue
+
+ elif operator in ("apply", "block", "try", "if", "for", "while"):
+ # parse inner body recursively
+ block_body = _parse(reader, template, operator)
+ if operator == "apply":
+ if not suffix:
+ raise ParseError("apply missing method name on line %d" % line)
+ block = _ApplyBlock(suffix, line, block_body)
+ elif operator == "block":
+ if not suffix:
+ raise ParseError("block missing name on line %d" % line)
+ block = _NamedBlock(suffix, block_body, template, line)
+ else:
+ block = _ControlBlock(contents, line, block_body)
+ body.chunks.append(block)
+ continue
+
+ else:
+ raise ParseError("unknown operator: %r" % operator)
diff --git a/libs/tornado/testing.py b/libs/tornado/testing.py
new file mode 100644
index 00000000..b2b983dd
--- /dev/null
+++ b/libs/tornado/testing.py
@@ -0,0 +1,382 @@
+#!/usr/bin/env python
+"""Support classes for automated testing.
+
+This module contains three parts:
+
+* `AsyncTestCase`/`AsyncHTTPTestCase`: Subclasses of unittest.TestCase
+ with additional support for testing asynchronous (IOLoop-based) code.
+
+* `LogTrapTestCase`: Subclass of unittest.TestCase that discards log output
+ from tests that pass and only produces output for failing tests.
+
+* `main()`: A simple test runner (wrapper around unittest.main()) with support
+ for the tornado.autoreload module to rerun the tests when code changes.
+
+These components may be used together or independently. In particular,
+it is safe to combine AsyncTestCase and LogTrapTestCase via multiple
+inheritance. See the docstrings for each class/function below for more
+information.
+"""
+
+from __future__ import with_statement
+
+from cStringIO import StringIO
+try:
+ from tornado.httpclient import AsyncHTTPClient
+ from tornado.httpserver import HTTPServer
+ from tornado.ioloop import IOLoop
+except ImportError:
+ # These modules are not importable on app engine. Parts of this module
+ # won't work, but e.g. LogTrapTestCase and main() will.
+ AsyncHTTPClient = None
+ HTTPServer = None
+ IOLoop = None
+from tornado.stack_context import StackContext, NullContext
+import contextlib
+import logging
+import signal
+import sys
+import time
+import unittest
+
+_next_port = 10000
+def get_unused_port():
+ """Returns a (hopefully) unused port number."""
+ global _next_port
+ port = _next_port
+ _next_port = _next_port + 1
+ return port
+
+class AsyncTestCase(unittest.TestCase):
+ """TestCase subclass for testing IOLoop-based asynchronous code.
+
+ The unittest framework is synchronous, so the test must be complete
+ by the time the test method returns. This method provides the stop()
+ and wait() methods for this purpose. The test method itself must call
+ self.wait(), and asynchronous callbacks should call self.stop() to signal
+ completion.
+
+ By default, a new IOLoop is constructed for each test and is available
+ as self.io_loop. This IOLoop should be used in the construction of
+ HTTP clients/servers, etc. If the code being tested requires a
+ global IOLoop, subclasses should override get_new_ioloop to return it.
+
+ The IOLoop's start and stop methods should not be called directly.
+ Instead, use self.stop self.wait. Arguments passed to self.stop are
+ returned from self.wait. It is possible to have multiple
+ wait/stop cycles in the same test.
+
+ Example::
+
+ # This test uses an asynchronous style similar to most async
+ # application code.
+ class MyTestCase(AsyncTestCase):
+ def test_http_fetch(self):
+ client = AsyncHTTPClient(self.io_loop)
+ client.fetch("http://www.tornadoweb.org/", self.handle_fetch)
+ self.wait()
+
+ def handle_fetch(self, response):
+ # Test contents of response (failures and exceptions here
+ # will cause self.wait() to throw an exception and end the
+ # test).
+ # Exceptions thrown here are magically propagated to
+ # self.wait() in test_http_fetch() via stack_context.
+ self.assertIn("FriendFeed", response.body)
+ self.stop()
+
+ # This test uses the argument passing between self.stop and self.wait
+ # for a simpler, more synchronous style.
+ # This style is recommended over the preceding example because it
+ # keeps the assertions in the test method itself, and is therefore
+ # less sensitive to the subtleties of stack_context.
+ class MyTestCase2(AsyncTestCase):
+ def test_http_fetch(self):
+ client = AsyncHTTPClient(self.io_loop)
+ client.fetch("http://www.tornadoweb.org/", self.stop)
+ response = self.wait()
+ # Test contents of response
+ self.assertIn("FriendFeed", response.body)
+ """
+ def __init__(self, *args, **kwargs):
+ super(AsyncTestCase, self).__init__(*args, **kwargs)
+ self.__stopped = False
+ self.__running = False
+ self.__failure = None
+ self.__stop_args = None
+
+ def setUp(self):
+ super(AsyncTestCase, self).setUp()
+ self.io_loop = self.get_new_ioloop()
+
+ def tearDown(self):
+ if (not IOLoop.initialized() or
+ self.io_loop is not IOLoop.instance()):
+ # Try to clean up any file descriptors left open in the ioloop.
+ # This avoids leaks, especially when tests are run repeatedly
+ # in the same process with autoreload (because curl does not
+ # set FD_CLOEXEC on its file descriptors)
+ self.io_loop.close(all_fds=True)
+ super(AsyncTestCase, self).tearDown()
+
+ def get_new_ioloop(self):
+ '''Creates a new IOLoop for this test. May be overridden in
+ subclasses for tests that require a specific IOLoop (usually
+ the singleton).
+ '''
+ return IOLoop()
+
+ @contextlib.contextmanager
+ def _stack_context(self):
+ try:
+ yield
+ except Exception:
+ self.__failure = sys.exc_info()
+ self.stop()
+
+ def run(self, result=None):
+ with StackContext(self._stack_context):
+ super(AsyncTestCase, self).run(result)
+
+ def stop(self, _arg=None, **kwargs):
+ '''Stops the ioloop, causing one pending (or future) call to wait()
+ to return.
+
+ Keyword arguments or a single positional argument passed to stop() are
+ saved and will be returned by wait().
+ '''
+ assert _arg is None or not kwargs
+ self.__stop_args = kwargs or _arg
+ if self.__running:
+ self.io_loop.stop()
+ self.__running = False
+ self.__stopped = True
+
+ def wait(self, condition=None, timeout=5):
+ """Runs the IOLoop until stop is called or timeout has passed.
+
+ In the event of a timeout, an exception will be thrown.
+
+ If condition is not None, the IOLoop will be restarted after stop()
+ until condition() returns true.
+ """
+ if not self.__stopped:
+ if timeout:
+ def timeout_func():
+ try:
+ raise self.failureException(
+ 'Async operation timed out after %d seconds' %
+ timeout)
+ except Exception:
+ self.__failure = sys.exc_info()
+ self.stop()
+ self.io_loop.add_timeout(time.time() + timeout, timeout_func)
+ while True:
+ self.__running = True
+ with NullContext():
+ # Wipe out the StackContext that was established in
+ # self.run() so that all callbacks executed inside the
+ # IOLoop will re-run it.
+ self.io_loop.start()
+ if (self.__failure is not None or
+ condition is None or condition()):
+ break
+ assert self.__stopped
+ self.__stopped = False
+ if self.__failure is not None:
+ # 2to3 isn't smart enough to convert three-argument raise
+ # statements correctly in some cases.
+ if isinstance(self.__failure[1], self.__failure[0]):
+ raise self.__failure[1], None, self.__failure[2]
+ else:
+ raise self.__failure[0], self.__failure[1], self.__failure[2]
+ result = self.__stop_args
+ self.__stop_args = None
+ return result
+
+
+class AsyncHTTPTestCase(AsyncTestCase):
+ '''A test case that starts up an HTTP server.
+
+ Subclasses must override get_app(), which returns the
+ tornado.web.Application (or other HTTPServer callback) to be tested.
+ Tests will typically use the provided self.http_client to fetch
+ URLs from this server.
+
+ Example::
+
+ class MyHTTPTest(AsyncHTTPTestCase):
+ def get_app(self):
+ return Application([('/', MyHandler)...])
+
+ def test_homepage(self):
+ # The following two lines are equivalent to
+ # response = self.fetch('/')
+ # but are shown in full here to demonstrate explicit use
+ # of self.stop and self.wait.
+ self.http_client.fetch(self.get_url('/'), self.stop)
+ response = self.wait()
+ # test contents of response
+ '''
+ def setUp(self):
+ super(AsyncHTTPTestCase, self).setUp()
+ self.__port = None
+
+ self.http_client = AsyncHTTPClient(io_loop=self.io_loop)
+ self._app = self.get_app()
+ self.http_server = HTTPServer(self._app, io_loop=self.io_loop,
+ **self.get_httpserver_options())
+ self.http_server.listen(self.get_http_port(), address="127.0.0.1")
+
+ def get_app(self):
+ """Should be overridden by subclasses to return a
+ tornado.web.Application or other HTTPServer callback.
+ """
+ raise NotImplementedError()
+
+ def fetch(self, path, **kwargs):
+ """Convenience method to synchronously fetch a url.
+
+ The given path will be appended to the local server's host and port.
+ Any additional kwargs will be passed directly to
+ AsyncHTTPClient.fetch (and so could be used to pass method="POST",
+ body="...", etc).
+ """
+ self.http_client.fetch(self.get_url(path), self.stop, **kwargs)
+ return self.wait()
+
+ def get_httpserver_options(self):
+ """May be overridden by subclasses to return additional
+ keyword arguments for HTTPServer.
+ """
+ return {}
+
+ def get_http_port(self):
+ """Returns the port used by the HTTPServer.
+
+ A new port is chosen for each test.
+ """
+ if self.__port is None:
+ self.__port = get_unused_port()
+ return self.__port
+
+ def get_url(self, path):
+ """Returns an absolute url for the given path on the test server."""
+ return 'http://localhost:%s%s' % (self.get_http_port(), path)
+
+ def tearDown(self):
+ self.http_server.stop()
+ self.http_client.close()
+ super(AsyncHTTPTestCase, self).tearDown()
+
+class LogTrapTestCase(unittest.TestCase):
+ """A test case that captures and discards all logging output
+ if the test passes.
+
+ Some libraries can produce a lot of logging output even when
+ the test succeeds, so this class can be useful to minimize the noise.
+ Simply use it as a base class for your test case. It is safe to combine
+ with AsyncTestCase via multiple inheritance
+ ("class MyTestCase(AsyncHTTPTestCase, LogTrapTestCase):")
+
+ This class assumes that only one log handler is configured and that
+ it is a StreamHandler. This is true for both logging.basicConfig
+ and the "pretty logging" configured by tornado.options.
+ """
+ def run(self, result=None):
+ logger = logging.getLogger()
+ if len(logger.handlers) > 1:
+ # Multiple handlers have been defined. It gets messy to handle
+ # this, especially since the handlers may have different
+ # formatters. Just leave the logging alone in this case.
+ super(LogTrapTestCase, self).run(result)
+ return
+ if not logger.handlers:
+ logging.basicConfig()
+ self.assertEqual(len(logger.handlers), 1)
+ handler = logger.handlers[0]
+ assert isinstance(handler, logging.StreamHandler)
+ old_stream = handler.stream
+ try:
+ handler.stream = StringIO()
+ logging.info("RUNNING TEST: " + str(self))
+ old_error_count = len(result.failures) + len(result.errors)
+ super(LogTrapTestCase, self).run(result)
+ new_error_count = len(result.failures) + len(result.errors)
+ if new_error_count != old_error_count:
+ old_stream.write(handler.stream.getvalue())
+ finally:
+ handler.stream = old_stream
+
+def main():
+ """A simple test runner.
+
+ This test runner is essentially equivalent to `unittest.main` from
+ the standard library, but adds support for tornado-style option
+ parsing and log formatting.
+
+ The easiest way to run a test is via the command line::
+
+ python -m tornado.testing tornado.test.stack_context_test
+
+ See the standard library unittest module for ways in which tests can
+ be specified.
+
+ Projects with many tests may wish to define a test script like
+ tornado/test/runtests.py. This script should define a method all()
+ which returns a test suite and then call tornado.testing.main().
+ Note that even when a test script is used, the all() test suite may
+ be overridden by naming a single test on the command line::
+
+ # Runs all tests
+ tornado/test/runtests.py
+ # Runs one test
+ tornado/test/runtests.py tornado.test.stack_context_test
+
+ """
+ from tornado.options import define, options, parse_command_line
+
+ define('autoreload', type=bool, default=False,
+ help="DEPRECATED: use tornado.autoreload.main instead")
+ define('httpclient', type=str, default=None)
+ define('exception_on_interrupt', type=bool, default=True,
+ help=("If true (default), ctrl-c raises a KeyboardInterrupt "
+ "exception. This prints a stack trace but cannot interrupt "
+ "certain operations. If false, the process is more reliably "
+ "killed, but does not print a stack trace."))
+ argv = [sys.argv[0]] + parse_command_line(sys.argv)
+
+ if options.httpclient:
+ from tornado.httpclient import AsyncHTTPClient
+ AsyncHTTPClient.configure(options.httpclient)
+
+ if not options.exception_on_interrupt:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+ if __name__ == '__main__' and len(argv) == 1:
+ print >> sys.stderr, "No tests specified"
+ sys.exit(1)
+ try:
+ # In order to be able to run tests by their fully-qualified name
+ # on the command line without importing all tests here,
+ # module must be set to None. Python 3.2's unittest.main ignores
+ # defaultTest if no module is given (it tries to do its own
+ # test discovery, which is incompatible with auto2to3), so don't
+ # set module if we're not asking for a specific test.
+ if len(argv) > 1:
+ unittest.main(module=None, argv=argv)
+ else:
+ unittest.main(defaultTest="all", argv=argv)
+ except SystemExit, e:
+ if e.code == 0:
+ logging.info('PASS')
+ else:
+ logging.error('FAIL')
+ if not options.autoreload:
+ raise
+ if options.autoreload:
+ import tornado.autoreload
+ tornado.autoreload.wait()
+
+if __name__ == '__main__':
+ main()
diff --git a/libs/tornado/util.py b/libs/tornado/util.py
new file mode 100644
index 00000000..6752401a
--- /dev/null
+++ b/libs/tornado/util.py
@@ -0,0 +1,47 @@
+"""Miscellaneous utility functions."""
+
+class ObjectDict(dict):
+ """Makes a dictionary behave like an object."""
+ def __getattr__(self, name):
+ try:
+ return self[name]
+ except KeyError:
+ raise AttributeError(name)
+
+ def __setattr__(self, name, value):
+ self[name] = value
+
+
+def import_object(name):
+ """Imports an object by name.
+
+ import_object('x.y.z') is equivalent to 'from x.y import z'.
+
+ >>> import tornado.escape
+ >>> import_object('tornado.escape') is tornado.escape
+ True
+ >>> import_object('tornado.escape.utf8') is tornado.escape.utf8
+ True
+ """
+ parts = name.split('.')
+ obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
+ return getattr(obj, parts[-1])
+
+# Fake byte literal support: In python 2.6+, you can say b"foo" to get
+# a byte literal (str in 2.x, bytes in 3.x). There's no way to do this
+# in a way that supports 2.5, though, so we need a function wrapper
+# to convert our string literals. b() should only be applied to literal
+# latin1 strings. Once we drop support for 2.5, we can remove this function
+# and just use byte literals.
+if str is unicode:
+ def b(s):
+ return s.encode('latin1')
+ bytes_type = bytes
+else:
+ def b(s):
+ return s
+ bytes_type = str
+
+def doctests():
+ import doctest
+ return doctest.DocTestSuite()
diff --git a/libs/tornado/web.py b/libs/tornado/web.py
new file mode 100644
index 00000000..76392b75
--- /dev/null
+++ b/libs/tornado/web.py
@@ -0,0 +1,1985 @@
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+The Tornado web framework looks a bit like web.py (http://webpy.org/) or
+Google's webapp (http://code.google.com/appengine/docs/python/tools/webapp/),
+but with additional tools and optimizations to take advantage of the
+Tornado non-blocking web server and tools.
+
+Here is the canonical "Hello, world" example app::
+
+ import tornado.ioloop
+ import tornado.web
+
+ class MainHandler(tornado.web.RequestHandler):
+ def get(self):
+ self.write("Hello, world")
+
+ if __name__ == "__main__":
+ application = tornado.web.Application([
+ (r"/", MainHandler),
+ ])
+ application.listen(8888)
+ tornado.ioloop.IOLoop.instance().start()
+
+See the Tornado walkthrough on http://tornadoweb.org for more details
+and a good getting started guide.
+
+Thread-safety notes
+-------------------
+
+In general, methods on RequestHandler and elsewhere in tornado are not
+thread-safe. In particular, methods such as write(), finish(), and
+flush() must only be called from the main thread. If you use multiple
+threads it is important to use IOLoop.add_callback to transfer control
+back to the main thread before finishing the request.
+"""
+
+from __future__ import with_statement
+
+import Cookie
+import base64
+import binascii
+import calendar
+import datetime
+import email.utils
+import functools
+import gzip
+import hashlib
+import hmac
+import httplib
+import itertools
+import logging
+import mimetypes
+import os.path
+import re
+import stat
+import sys
+import threading
+import time
+import tornado
+import traceback
+import types
+import urllib
+import urlparse
+import uuid
+
+from tornado import escape
+from tornado import locale
+from tornado import stack_context
+from tornado import template
+from tornado.escape import utf8, _unicode
+from tornado.util import b, bytes_type, import_object, ObjectDict
+
+try:
+ from io import BytesIO # python 3
+except ImportError:
+ from cStringIO import StringIO as BytesIO # python 2
+
+class RequestHandler(object):
+ """Subclass this class and define get() or post() to make a handler.
+
+ If you want to support more methods than the standard GET/HEAD/POST, you
+ should override the class variable SUPPORTED_METHODS in your
+ RequestHandler class.
+ """
+ SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PUT", "OPTIONS")
+
+ _template_loaders = {} # {path: template.BaseLoader}
+ _template_loader_lock = threading.Lock()
+
+ def __init__(self, application, request, **kwargs):
+ self.application = application
+ self.request = request
+ self._headers_written = False
+ self._finished = False
+ self._auto_finish = True
+ self._transforms = None # will be set in _execute
+ self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
+ application.ui_methods.iteritems())
+ # UIModules are available as both `modules` and `_modules` in the
+ # template namespace. Historically only `modules` was available
+ # but could be clobbered by user additions to the namespace.
+ # The template {% module %} directive looks in `_modules` to avoid
+ # possible conflicts.
+ self.ui["_modules"] = ObjectDict((n, self._ui_module(n, m)) for n, m in
+ application.ui_modules.iteritems())
+ self.ui["modules"] = self.ui["_modules"]
+ self.clear()
+ # Check since connection is not available in WSGI
+ if hasattr(self.request, "connection"):
+ self.request.connection.stream.set_close_callback(
+ self.on_connection_close)
+ self.initialize(**kwargs)
+
+ def initialize(self):
+ """Hook for subclass initialization.
+
+ A dictionary passed as the third argument of a url spec will be
+ supplied as keyword arguments to initialize().
+
+ Example::
+
+ class ProfileHandler(RequestHandler):
+ def initialize(self, database):
+ self.database = database
+
+ def get(self, username):
+ ...
+
+ app = Application([
+ (r'/user/(.*)', ProfileHandler, dict(database=database)),
+ ])
+ """
+ pass
+
+ @property
+ def settings(self):
+ """An alias for `self.application.settings`."""
+ return self.application.settings
+
+ def head(self, *args, **kwargs):
+ raise HTTPError(405)
+
+ def get(self, *args, **kwargs):
+ raise HTTPError(405)
+
+ def post(self, *args, **kwargs):
+ raise HTTPError(405)
+
+ def delete(self, *args, **kwargs):
+ raise HTTPError(405)
+
+ def put(self, *args, **kwargs):
+ raise HTTPError(405)
+
+ def options(self, *args, **kwargs):
+ raise HTTPError(405)
+
+ def prepare(self):
+ """Called at the beginning of a request before `get`/`post`/etc.
+
+ Override this method to perform common initialization regardless
+ of the request method.
+ """
+ pass
+
+ def on_finish(self):
+ """Called after the end of a request.
+
+ Override this method to perform cleanup, logging, etc.
+ This method is a counterpart to `prepare`. ``on_finish`` may
+ not produce any output, as it is called after the response
+ has been sent to the client.
+ """
+ pass
+
+ def on_connection_close(self):
+ """Called in async handlers if the client closed the connection.
+
+ Override this to clean up resources associated with
+ long-lived connections. Note that this method is called only if
+ the connection was closed during asynchronous processing; if you
+ need to do cleanup after every request override `on_finish`
+ instead.
+
+ Proxies may keep a connection open for a time (perhaps
+ indefinitely) after the client has gone away, so this method
+ may not be called promptly after the end user closes their
+ connection.
+ """
+ pass
+
+ def clear(self):
+ """Resets all headers and content for this response."""
+ # The performance cost of tornado.httputil.HTTPHeaders is significant
+ # (slowing down a benchmark with a trivial handler by more than 10%),
+ # and its case-normalization is not generally necessary for
+ # headers we generate on the server side, so use a plain dict
+ # and list instead.
+ self._headers = {
+ "Server": "TornadoServer/%s" % tornado.version,
+ "Content-Type": "text/html; charset=UTF-8",
+ }
+ self._list_headers = []
+ self.set_default_headers()
+ if not self.request.supports_http_1_1():
+ if self.request.headers.get("Connection") == "Keep-Alive":
+ self.set_header("Connection", "Keep-Alive")
+ self._write_buffer = []
+ self._status_code = 200
+
+ def set_default_headers(self):
+ """Override this to set HTTP headers at the beginning of the request.
+
+ For example, this is the place to set a custom ``Server`` header.
+ Note that setting such headers in the normal flow of request
+ processing may not do what you want, since headers may be reset
+ during error handling.
+ """
+ pass
+
+ def set_status(self, status_code):
+ """Sets the status code for our response."""
+ assert status_code in httplib.responses
+ self._status_code = status_code
+
+ def get_status(self):
+ """Returns the status code for our response."""
+ return self._status_code
+
+ def set_header(self, name, value):
+ """Sets the given response header name and value.
+
+ If a datetime is given, we automatically format it according to the
+ HTTP specification. If the value is not a string, we convert it to
+ a string. All header values are then encoded as UTF-8.
+ """
+ self._headers[name] = self._convert_header_value(value)
+
+ def add_header(self, name, value):
+ """Adds the given response header and value.
+
+ Unlike `set_header`, `add_header` may be called multiple times
+ to return multiple values for the same header.
+ """
+ self._list_headers.append((name, self._convert_header_value(value)))
+
+ def _convert_header_value(self, value):
+ if isinstance(value, bytes_type):
+ pass
+ elif isinstance(value, unicode):
+ value = value.encode('utf-8')
+ elif isinstance(value, (int, long)):
+ # return immediately since we know the converted value will be safe
+ return str(value)
+ elif isinstance(value, datetime.datetime):
+ t = calendar.timegm(value.utctimetuple())
+ return email.utils.formatdate(t, localtime=False, usegmt=True)
+ else:
+ raise TypeError("Unsupported header value %r" % value)
+ # If \n is allowed into the header, it is possible to inject
+ # additional headers or split the request. Also cap length to
+ # prevent obviously erroneous values.
+ if len(value) > 4000 or re.search(b(r"[\x00-\x1f]"), value):
+ raise ValueError("Unsafe header value %r", value)
+ return value
+
+
+ _ARG_DEFAULT = []
+ def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
+ """Returns the value of the argument with the given name.
+
+ If default is not provided, the argument is considered to be
+ required, and we throw an HTTP 400 exception if it is missing.
+
+ If the argument appears in the url more than once, we return the
+ last value.
+
+ The returned value is always unicode.
+ """
+ args = self.get_arguments(name, strip=strip)
+ if not args:
+ if default is self._ARG_DEFAULT:
+ raise HTTPError(400, "Missing argument %s" % name)
+ return default
+ return args[-1]
+
+ def get_arguments(self, name, strip=True):
+ """Returns a list of the arguments with the given name.
+
+ If the argument is not present, returns an empty list.
+
+ The returned values are always unicode.
+ """
+ values = []
+ for v in self.request.arguments.get(name, []):
+ v = self.decode_argument(v, name=name)
+ if isinstance(v, unicode):
+ # Get rid of any weird control chars (unless decoding gave
+ # us bytes, in which case leave it alone)
+ v = re.sub(r"[\x00-\x08\x0e-\x1f]", " ", v)
+ if strip:
+ v = v.strip()
+ values.append(v)
+ return values
+
+ def decode_argument(self, value, name=None):
+ """Decodes an argument from the request.
+
+ The argument has been percent-decoded and is now a byte string.
+ By default, this method decodes the argument as utf-8 and returns
+ a unicode string, but this may be overridden in subclasses.
+
+ This method is used as a filter for both get_argument() and for
+ values extracted from the url and passed to get()/post()/etc.
+
+ The name of the argument is provided if known, but may be None
+ (e.g. for unnamed groups in the url regex).
+ """
+ return _unicode(value)
+
+ @property
+ def cookies(self):
+ return self.request.cookies
+
+ def get_cookie(self, name, default=None):
+ """Gets the value of the cookie with the given name, else default."""
+ if self.request.cookies is not None and name in self.request.cookies:
+ return self.request.cookies[name].value
+ return default
+
+ def set_cookie(self, name, value, domain=None, expires=None, path="/",
+ expires_days=None, **kwargs):
+ """Sets the given cookie name/value with the given options.
+
+ Additional keyword arguments are set on the Cookie.Morsel
+ directly.
+ See http://docs.python.org/library/cookie.html#morsel-objects
+ for available attributes.
+ """
+ # The cookie library only accepts type str, in both python 2 and 3
+ name = escape.native_str(name)
+ value = escape.native_str(value)
+ if re.search(r"[\x00-\x20]", name + value):
+ # Don't let us accidentally inject bad stuff
+ raise ValueError("Invalid cookie %r: %r" % (name, value))
+ if not hasattr(self, "_new_cookies"):
+ self._new_cookies = []
+ new_cookie = Cookie.SimpleCookie()
+ self._new_cookies.append(new_cookie)
+ new_cookie[name] = value
+ if domain:
+ new_cookie[name]["domain"] = domain
+ if expires_days is not None and not expires:
+ expires = datetime.datetime.utcnow() + datetime.timedelta(
+ days=expires_days)
+ if expires:
+ timestamp = calendar.timegm(expires.utctimetuple())
+ new_cookie[name]["expires"] = email.utils.formatdate(
+ timestamp, localtime=False, usegmt=True)
+ if path:
+ new_cookie[name]["path"] = path
+ for k, v in kwargs.iteritems():
+ if k == 'max_age': k = 'max-age'
+ new_cookie[name][k] = v
+
+ def clear_cookie(self, name, path="/", domain=None):
+ """Deletes the cookie with the given name."""
+ expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
+ self.set_cookie(name, value="", path=path, expires=expires,
+ domain=domain)
+
+ def clear_all_cookies(self):
+ """Deletes all the cookies the user sent with this request."""
+ for name in self.request.cookies.iterkeys():
+ self.clear_cookie(name)
+
+ def set_secure_cookie(self, name, value, expires_days=30, **kwargs):
+ """Signs and timestamps a cookie so it cannot be forged.
+
+ You must specify the ``cookie_secret`` setting in your Application
+ to use this method. It should be a long, random sequence of bytes
+ to be used as the HMAC secret for the signature.
+
+ To read a cookie set with this method, use `get_secure_cookie()`.
+
+ Note that the ``expires_days`` parameter sets the lifetime of the
+ cookie in the browser, but is independent of the ``max_age_days``
+ parameter to `get_secure_cookie`.
+ """
+ self.set_cookie(name, self.create_signed_value(name, value),
+ expires_days=expires_days, **kwargs)
+
+ def create_signed_value(self, name, value):
+ """Signs and timestamps a string so it cannot be forged.
+
+ Normally used via set_secure_cookie, but provided as a separate
+ method for non-cookie uses. To decode a value not stored
+ as a cookie use the optional value argument to get_secure_cookie.
+ """
+ self.require_setting("cookie_secret", "secure cookies")
+ return create_signed_value(self.application.settings["cookie_secret"],
+ name, value)
+
+ def get_secure_cookie(self, name, value=None, max_age_days=31):
+ """Returns the given signed cookie if it validates, or None."""
+ self.require_setting("cookie_secret", "secure cookies")
+ if value is None: value = self.get_cookie(name)
+ return decode_signed_value(self.application.settings["cookie_secret"],
+ name, value, max_age_days=max_age_days)
+
+ def redirect(self, url, permanent=False, status=None):
+ """Sends a redirect to the given (optionally relative) URL.
+
+ If the ``status`` argument is specified, that value is used as the
+ HTTP status code; otherwise either 301 (permanent) or 302
+ (temporary) is chosen based on the ``permanent`` argument.
+ The default is 302 (temporary).
+ """
+ if self._headers_written:
+ raise Exception("Cannot redirect after headers have been written")
+ if status is None:
+ status = 301 if permanent else 302
+ else:
+ assert isinstance(status, int) and 300 <= status <= 399
+ self.set_status(status)
+ # Remove whitespace
+ url = re.sub(b(r"[\x00-\x20]+"), "", utf8(url))
+ self.set_header("Location", urlparse.urljoin(utf8(self.request.uri),
+ url))
+ self.finish()
+
+ def write(self, chunk):
+ """Writes the given chunk to the output buffer.
+
+ To write the output to the network, use the flush() method below.
+
+ If the given chunk is a dictionary, we write it as JSON and set
+ the Content-Type of the response to be application/json.
+ (if you want to send JSON as a different Content-Type, call
+ set_header *after* calling write()).
+
+ Note that lists are not converted to JSON because of a potential
+ cross-site security vulnerability. All JSON output should be
+ wrapped in a dictionary. More details at
+ http://haacked.com/archive/2008/11/20/anatomy-of-a-subtle-json-vulnerability.aspx
+ """
+ if self._finished:
+ raise RuntimeError("Cannot write() after finish(). May be caused "
+ "by using async operations without the "
+ "@asynchronous decorator.")
+ if isinstance(chunk, dict):
+ chunk = escape.json_encode(chunk)
+ self.set_header("Content-Type", "application/json; charset=UTF-8")
+ chunk = utf8(chunk)
+ self._write_buffer.append(chunk)
+
+ def render(self, template_name, **kwargs):
+ """Renders the template with the given arguments as the response."""
+ html = self.render_string(template_name, **kwargs)
+
+ # Insert the additional JS and CSS added by the modules on the page
+ js_embed = []
+ js_files = []
+ css_embed = []
+ css_files = []
+ html_heads = []
+ html_bodies = []
+ for module in getattr(self, "_active_modules", {}).itervalues():
+ embed_part = module.embedded_javascript()
+ if embed_part: js_embed.append(utf8(embed_part))
+ file_part = module.javascript_files()
+ if file_part:
+ if isinstance(file_part, (unicode, bytes_type)):
+ js_files.append(file_part)
+ else:
+ js_files.extend(file_part)
+ embed_part = module.embedded_css()
+ if embed_part: css_embed.append(utf8(embed_part))
+ file_part = module.css_files()
+ if file_part:
+ if isinstance(file_part, (unicode, bytes_type)):
+ css_files.append(file_part)
+ else:
+ css_files.extend(file_part)
+ head_part = module.html_head()
+ if head_part: html_heads.append(utf8(head_part))
+ body_part = module.html_body()
+ if body_part: html_bodies.append(utf8(body_part))
+ def is_absolute(path):
+ return any(path.startswith(x) for x in ["/", "http:", "https:"])
+ if js_files:
+ # Maintain order of JavaScript files given by modules
+ paths = []
+ unique_paths = set()
+ for path in js_files:
+ if not is_absolute(path):
+ path = self.static_url(path)
+ if path not in unique_paths:
+ paths.append(path)
+ unique_paths.add(path)
+ js = ''.join(''
+ for p in paths)
+ sloc = html.rindex(b('